summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorLinus Torvalds2018-01-31 23:31:10 +0100
committerLinus Torvalds2018-01-31 23:31:10 +0100
commitb2fe5fa68642860e7de76167c3111623aa0d5de1 (patch)
treeb7f9b89b7039ecefbc35fe3c8e73a6ff972641dd /drivers/net/ethernet
parentMerge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert... (diff)
parenttls: Add support for encryption using async offload accelerator (diff)
downloadkernel-qcow2-linux-b2fe5fa68642860e7de76167c3111623aa0d5de1.tar.gz
kernel-qcow2-linux-b2fe5fa68642860e7de76167c3111623aa0d5de1.tar.xz
kernel-qcow2-linux-b2fe5fa68642860e7de76167c3111623aa0d5de1.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) Significantly shrink the core networking routing structures. Result of http://vger.kernel.org/~davem/seoul2017_netdev_keynote.pdf 2) Add netdevsim driver for testing various offloads, from Jakub Kicinski. 3) Support cross-chip FDB operations in DSA, from Vivien Didelot. 4) Add a 2nd listener hash table for TCP, similar to what was done for UDP. From Martin KaFai Lau. 5) Add eBPF based queue selection to tun, from Jason Wang. 6) Lockless qdisc support, from John Fastabend. 7) SCTP stream interleave support, from Xin Long. 8) Smoother TCP receive autotuning, from Eric Dumazet. 9) Lots of erspan tunneling enhancements, from William Tu. 10) Add true function call support to BPF, from Alexei Starovoitov. 11) Add explicit support for GRO HW offloading, from Michael Chan. 12) Support extack generation in more netlink subsystems. From Alexander Aring, Quentin Monnet, and Jakub Kicinski. 13) Add 1000BaseX, flow control, and EEE support to mvneta driver. From Russell King. 14) Add flow table abstraction to netfilter, from Pablo Neira Ayuso. 15) Many improvements and simplifications to the NFP driver bpf JIT, from Jakub Kicinski. 16) Support for ipv6 non-equal cost multipath routing, from Ido Schimmel. 17) Add resource abstration to devlink, from Arkadi Sharshevsky. 18) Packet scheduler classifier shared filter block support, from Jiri Pirko. 19) Avoid locking in act_csum, from Davide Caratti. 20) devinet_ioctl() simplifications from Al viro. 21) More TCP bpf improvements from Lawrence Brakmo. 22) Add support for onlink ipv6 route flag, similar to ipv4, from David Ahern. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1925 commits) tls: Add support for encryption using async offload accelerator ip6mr: fix stale iterator net/sched: kconfig: Remove blank help texts openvswitch: meter: Use 64-bit arithmetic instead of 32-bit tcp_nv: fix potential integer overflow in tcpnv_acked r8169: fix RTL8168EP take too long to complete driver initialization. qmi_wwan: Add support for Quectel EP06 rtnetlink: enable IFLA_IF_NETNSID for RTM_NEWLINK ipmr: Fix ptrdiff_t print formatting ibmvnic: Wait for device response when changing MAC qlcnic: fix deadlock bug tcp: release sk_frag.page in tcp_disconnect ipv4: Get the address of interface correctly. net_sched: gen_estimator: fix lockdep splat net: macb: Handle HRESP error net/mlx5e: IPoIB, Fix copy-paste bug in flow steering refactoring ipv6: addrconf: break critical section in addrconf_verify_rtnl() ipv6: change route cache aging logic i40e/i40evf: Update DESC_NEEDED value to reflect larger value bnxt_en: cleanup DIM work on device shutdown ...
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/Makefile2
-rw-r--r--drivers/net/ethernet/alteon/acenic.c14
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c11
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c68
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h6
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c24
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h39
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h79
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c11
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c110
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c290
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h62
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h45
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c413
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h26
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_utils.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c472
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h18
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h65
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c506
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h28
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h64
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c1326
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h544
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h1521
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c346
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h133
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c184
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/ver.h6
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c27
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.h3
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c12
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c29
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c814
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h89
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c32
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h11936
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c221
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c7
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c21
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h14
-rw-r--r--drivers/net/ethernet/cadence/macb.h170
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c758
-rw-r--r--drivers/net/ethernet/cavium/Kconfig13
-rw-r--r--drivers/net/ethernet/cavium/Makefile1
-rw-r--r--drivers/net/ethernet/cavium/common/Makefile1
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c353
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.h70
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c7
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h36
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c58
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c31
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c297
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c41
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h6
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c31
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h4
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_xcv.c2
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h156
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c1173
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.c82
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.h43
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h46
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c117
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c304
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c107
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c140
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c605
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c221
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c269
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h45
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h164
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h45
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c38
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c28
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c12
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c5
-rw-r--r--drivers/net/ethernet/cortina/Kconfig23
-rw-r--r--drivers/net/ethernet/cortina/Makefile4
-rw-r--r--drivers/net/ethernet/cortina/gemini.c2593
-rw-r--r--drivers/net/ethernet/cortina/gemini.h958
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c3
-rw-r--r--drivers/net/ethernet/freescale/fec.h5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c28
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c9
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig28
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c109
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/Makefile7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h88
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h32
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c (renamed from drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c)2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c (renamed from drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c)405
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h (renamed from drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h)28
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c (renamed from drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c)347
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h106
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c1588
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h129
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c418
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c64
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c342
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h248
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c1505
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h164
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c181
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c20
-rw-r--r--drivers/net/ethernet/ibm/emac/emac.h13
-rw-r--r--drivers/net/ethernet/ibm/emac/phy.c10
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c43
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c38
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c36
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c14
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c54
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h67
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c17
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c74
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c192
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c220
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c143
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h51
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_status.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c84
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h54
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c15
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h30
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c31
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h10
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_status.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c66
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h54
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h9
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c28
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c228
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c83
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c32
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c72
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c121
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c56
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c941
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h93
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c76
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c589
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c63
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h61
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h16
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c372
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c34
-rw-r--r--drivers/net/ethernet/marvell/Kconfig2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c687
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c253
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c35
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dim.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c110
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c291
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c154
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c347
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c282
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c548
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c147
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c216
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c150
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c214
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c255
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c98
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c602
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h101
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c359
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c551
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c280
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c13
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile3
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c453
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/fw.h157
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c988
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c248
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h207
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c198
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c159
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c79
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c75
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h71
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c156
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h16
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c127
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c57
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h84
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.c95
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.h28
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h29
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h22
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c111
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c135
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h210
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c811
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c109
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c76
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c89
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.h16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c62
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c38
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c10
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c72
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c4
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c94
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c4
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c49
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c1167
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h10603
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c68
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c1091
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c58
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c33
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c119
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h33
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c28
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h190
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h24
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h10
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c47
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c33
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c18
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.h3
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c7
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c64
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h3
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c70
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h32
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c17
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c309
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h12
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c7
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c133
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c3
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c26
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c1
-rw-r--r--drivers/net/ethernet/sfc/ef10.c363
-rw-r--r--drivers/net/ethernet/sfc/ef10_regs.h46
-rw-r--r--drivers/net/ethernet/sfc/efx.c66
-rw-r--r--drivers/net/ethernet/sfc/efx.h9
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c6
-rw-r--r--drivers/net/ethernet/sfc/farch.c26
-rw-r--r--drivers/net/ethernet/sfc/io.h19
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h3
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h2453
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c168
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h33
-rw-r--r--drivers/net/ethernet/sfc/nic.h28
-rw-r--r--drivers/net/ethernet/sfc/ptp.c370
-rw-r--r--drivers/net/ethernet/sfc/siena.c12
-rw-r--r--drivers/net/ethernet/sfc/tx.c21
-rw-r--r--drivers/net/ethernet/socionext/Kconfig34
-rw-r--r--drivers/net/ethernet/socionext/Makefile6
-rw-r--r--drivers/net/ethernet/socionext/netsec.c1777
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c1736
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c113
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c61
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-common.c24
-rw-r--r--drivers/net/ethernet/ti/cpsw.c96
-rw-r--r--drivers/net/ethernet/ti/cpsw.h23
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c109
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h1
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c20
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c6
409 files changed, 51171 insertions, 22059 deletions
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index c60421339a98..b6cf4b6962f5 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -42,6 +42,7 @@ source "drivers/net/ethernet/cavium/Kconfig"
source "drivers/net/ethernet/chelsio/Kconfig"
source "drivers/net/ethernet/cirrus/Kconfig"
source "drivers/net/ethernet/cisco/Kconfig"
+source "drivers/net/ethernet/cortina/Kconfig"
config CX_ECAT
tristate "Beckhoff CX5020 EtherCAT master support"
@@ -170,6 +171,7 @@ source "drivers/net/ethernet/sis/Kconfig"
source "drivers/net/ethernet/sfc/Kconfig"
source "drivers/net/ethernet/sgi/Kconfig"
source "drivers/net/ethernet/smsc/Kconfig"
+source "drivers/net/ethernet/socionext/Kconfig"
source "drivers/net/ethernet/stmicro/Kconfig"
source "drivers/net/ethernet/sun/Kconfig"
source "drivers/net/ethernet/tehuti/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 39f6273358ed..3cdf01e96e0b 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_NET_VENDOR_CAVIUM) += cavium/
obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
+obj-$(CONFIG_NET_VENDOR_CORTINA) += cortina/
obj-$(CONFIG_CX_ECAT) += ec_bhf.o
obj-$(CONFIG_DM9000) += davicom/
obj-$(CONFIG_DNET) += dnet.o
@@ -82,6 +83,7 @@ obj-$(CONFIG_SFC) += sfc/
obj-$(CONFIG_SFC_FALCON) += sfc/falcon/
obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
+obj-$(CONFIG_NET_VENDOR_SOCIONEXT) += socionext/
obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
obj-$(CONFIG_NET_VENDOR_SUN) += sun/
obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index a1a52eb53b14..8f71b79b4949 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -1436,13 +1436,13 @@ static int ace_init(struct net_device *dev)
ace_set_txprd(regs, ap, 0);
writel(0, &regs->RxRetCsm);
- /*
- * Enable DMA engine now.
- * If we do this sooner, Mckinley box pukes.
- * I assume it's because Tigon II DMA engine wants to check
- * *something* even before the CPU is started.
- */
- writel(1, &regs->AssistState); /* enable DMA */
+ /*
+ * Enable DMA engine now.
+ * If we do this sooner, Mckinley box pukes.
+ * I assume it's because Tigon II DMA engine wants to check
+ * *something* even before the CPU is started.
+ */
+ writel(1, &regs->AssistState); /* enable DMA */
/*
* Start the NIC CPU
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index b11e573ad57a..ea149c134e15 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -504,3 +504,14 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
return 0;
}
+
+bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
+{
+ struct ena_eth_io_rx_cdesc_base *cdesc;
+
+ cdesc = ena_com_get_next_rx_cdesc(io_cq);
+ if (cdesc)
+ return false;
+ else
+ return true;
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index bb53c3a4f8e9..2f7657227cfe 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -88,6 +88,8 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id);
+bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
+
static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
struct ena_eth_io_intr_reg *intr_reg)
{
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index fbe21a817bd8..6975150d144e 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -161,6 +161,8 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter,
ring->per_napi_packets = 0;
ring->per_napi_bytes = 0;
ring->cpu = 0;
+ ring->first_interrupt = false;
+ ring->no_interrupt_event_cnt = 0;
u64_stats_init(&ring->syncp);
}
@@ -1277,6 +1279,9 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data)
{
struct ena_napi *ena_napi = data;
+ ena_napi->tx_ring->first_interrupt = true;
+ ena_napi->rx_ring->first_interrupt = true;
+
napi_schedule_irqoff(&ena_napi->napi);
return IRQ_HANDLED;
@@ -2663,8 +2668,32 @@ static void ena_fw_reset_device(struct work_struct *work)
rtnl_unlock();
}
-static int check_missing_comp_in_queue(struct ena_adapter *adapter,
- struct ena_ring *tx_ring)
+static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
+ struct ena_ring *rx_ring)
+{
+ if (likely(rx_ring->first_interrupt))
+ return 0;
+
+ if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
+ return 0;
+
+ rx_ring->no_interrupt_event_cnt++;
+
+ if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
+ netif_err(adapter, rx_err, adapter->netdev,
+ "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
+ rx_ring->qid);
+ adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
+ smp_mb__before_atomic();
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
+ struct ena_ring *tx_ring)
{
struct ena_tx_buffer *tx_buf;
unsigned long last_jiffies;
@@ -2674,8 +2703,27 @@ static int check_missing_comp_in_queue(struct ena_adapter *adapter,
for (i = 0; i < tx_ring->ring_size; i++) {
tx_buf = &tx_ring->tx_buffer_info[i];
last_jiffies = tx_buf->last_jiffies;
- if (unlikely(last_jiffies &&
- time_is_before_jiffies(last_jiffies + adapter->missing_tx_completion_to))) {
+
+ if (last_jiffies == 0)
+ /* no pending Tx at this location */
+ continue;
+
+ if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies +
+ 2 * adapter->missing_tx_completion_to))) {
+ /* If after graceful period interrupt is still not
+ * received, we schedule a reset
+ */
+ netif_err(adapter, tx_err, adapter->netdev,
+ "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
+ tx_ring->qid);
+ adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
+ smp_mb__before_atomic();
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ return -EIO;
+ }
+
+ if (unlikely(time_is_before_jiffies(last_jiffies +
+ adapter->missing_tx_completion_to))) {
if (!tx_buf->print_once)
netif_notice(adapter, tx_err, adapter->netdev,
"Found a Tx that wasn't completed on time, qid %d, index %d.\n",
@@ -2704,9 +2752,10 @@ static int check_missing_comp_in_queue(struct ena_adapter *adapter,
return rc;
}
-static void check_for_missing_tx_completions(struct ena_adapter *adapter)
+static void check_for_missing_completions(struct ena_adapter *adapter)
{
struct ena_ring *tx_ring;
+ struct ena_ring *rx_ring;
int i, budget, rc;
/* Make sure the driver doesn't turn the device in other process */
@@ -2725,8 +2774,13 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter)
for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
tx_ring = &adapter->tx_ring[i];
+ rx_ring = &adapter->rx_ring[i];
+
+ rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
+ if (unlikely(rc))
+ return;
- rc = check_missing_comp_in_queue(adapter, tx_ring);
+ rc = check_for_rx_interrupt_queue(adapter, rx_ring);
if (unlikely(rc))
return;
@@ -2885,7 +2939,7 @@ static void ena_timer_service(struct timer_list *t)
check_for_admin_com_state(adapter);
- check_for_missing_tx_completions(adapter);
+ check_for_missing_completions(adapter);
check_for_empty_rx_ring(adapter);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 3bbc003871de..f1972b5ab650 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -44,7 +44,7 @@
#include "ena_eth_com.h"
#define DRV_MODULE_VER_MAJOR 1
-#define DRV_MODULE_VER_MINOR 3
+#define DRV_MODULE_VER_MINOR 5
#define DRV_MODULE_VER_SUBMINOR 0
#define DRV_MODULE_NAME "ena"
@@ -122,6 +122,7 @@
* We wait for 6 sec just to be on the safe side.
*/
#define ENA_DEVICE_KALIVE_TIMEOUT (6 * HZ)
+#define ENA_MAX_NO_INTERRUPT_ITERATIONS 3
#define ENA_MMIO_DISABLE_REG_READ BIT(0)
@@ -236,6 +237,9 @@ struct ena_ring {
/* The maximum header length the device can handle */
u8 tx_max_header_size;
+ bool first_interrupt;
+ u16 no_interrupt_event_cnt;
+
/* cpu for TPH */
int cpu;
/* number of tx/rx_buffer_info's entries */
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index 9aec43c5bba8..48ca97fbe7bc 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -60,6 +60,8 @@ enum ena_regs_reset_reason_types {
ENA_REGS_RESET_USER_TRIGGER = 12,
ENA_REGS_RESET_GENERIC = 13,
+
+ ENA_REGS_RESET_MISS_INTERRUPT = 14,
};
/* ena_registers offsets */
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index a74a8fbad53a..7a3ebfd236f5 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -2930,9 +2930,8 @@ void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
{
struct ethhdr *eth = (struct ethhdr *)skb->data;
- unsigned char *buf = skb->data;
unsigned char buffer[128];
- unsigned int i, j;
+ unsigned int i;
netdev_dbg(netdev, "\n************** SKB dump ****************\n");
@@ -2943,22 +2942,13 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
- for (i = 0, j = 0; i < skb->len;) {
- j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
- buf[i++]);
-
- if ((i % 32) == 0) {
- netdev_dbg(netdev, " %#06x: %s\n", i - 32, buffer);
- j = 0;
- } else if ((i % 16) == 0) {
- buffer[j++] = ' ';
- buffer[j++] = ' ';
- } else if ((i % 4) == 0) {
- buffer[j++] = ' ';
- }
+ for (i = 0; i < skb->len; i += 32) {
+ unsigned int len = min(skb->len - i, 32U);
+
+ hex_dump_to_buffer(&skb->data[i], len, 32, 1,
+ buffer, sizeof(buffer), false);
+ netdev_dbg(netdev, " %#06x: %s\n", i, buffer);
}
- if (i % 32)
- netdev_dbg(netdev, " %#06x: %s\n", i - (i % 32), buffer);
netdev_dbg(netdev, "\n************** SKB dump ****************\n");
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
index e4ae696920ef..686f6d8c9e79 100644
--- a/drivers/net/ethernet/aquantia/atlantic/Makefile
+++ b/drivers/net/ethernet/aquantia/atlantic/Makefile
@@ -39,4 +39,5 @@ atlantic-objs := aq_main.o \
hw_atl/hw_atl_a0.o \
hw_atl/hw_atl_b0.o \
hw_atl/hw_atl_utils.o \
+ hw_atl/hw_atl_utils_fw2x.o \
hw_atl/hw_atl_llh.o
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 105fdb958cef..0b49f1aeebd3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -65,7 +65,13 @@
/*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/
-#define AQ_CFG_FC_MODE 3U
+#define AQ_NIC_FC_OFF 0U
+#define AQ_NIC_FC_TX 1U
+#define AQ_NIC_FC_RX 2U
+#define AQ_NIC_FC_FULL 3U
+#define AQ_NIC_FC_AUTO 4U
+
+#define AQ_CFG_FC_MODE AQ_NIC_FC_FULL
#define AQ_CFG_SPEED_MSK 0xFFFFU /* 0xFFFFU==auto_neg */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
index 9eb5e222a234..d52b088ff8f0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -16,8 +16,45 @@
#include <linux/pci.h>
#include "ver.h"
-#include "aq_nic.h"
#include "aq_cfg.h"
#include "aq_utils.h"
+#define PCI_VENDOR_ID_AQUANTIA 0x1D6A
+
+#define AQ_DEVICE_ID_0001 0x0001
+#define AQ_DEVICE_ID_D100 0xD100
+#define AQ_DEVICE_ID_D107 0xD107
+#define AQ_DEVICE_ID_D108 0xD108
+#define AQ_DEVICE_ID_D109 0xD109
+
+#define AQ_DEVICE_ID_AQC100 0x00B1
+#define AQ_DEVICE_ID_AQC107 0x07B1
+#define AQ_DEVICE_ID_AQC108 0x08B1
+#define AQ_DEVICE_ID_AQC109 0x09B1
+#define AQ_DEVICE_ID_AQC111 0x11B1
+#define AQ_DEVICE_ID_AQC112 0x12B1
+
+#define AQ_DEVICE_ID_AQC100S 0x80B1
+#define AQ_DEVICE_ID_AQC107S 0x87B1
+#define AQ_DEVICE_ID_AQC108S 0x88B1
+#define AQ_DEVICE_ID_AQC109S 0x89B1
+#define AQ_DEVICE_ID_AQC111S 0x91B1
+#define AQ_DEVICE_ID_AQC112S 0x92B1
+
+#define AQ_DEVICE_ID_AQC111E 0x51B1
+#define AQ_DEVICE_ID_AQC112E 0x52B1
+
+#define HW_ATL_NIC_NAME "aQuantia AQtion 10Gbit Network Adapter"
+
+#define AQ_HWREV_ANY 0
+#define AQ_HWREV_1 1
+#define AQ_HWREV_2 2
+
+#define AQ_NIC_RATE_10G BIT(0)
+#define AQ_NIC_RATE_5G BIT(1)
+#define AQ_NIC_RATE_5GSR BIT(2)
+#define AQ_NIC_RATE_2GS BIT(3)
+#define AQ_NIC_RATE_1G BIT(4)
+#define AQ_NIC_RATE_100M BIT(5)
+
#endif /* AQ_COMMON_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index b3825de6cdfb..a2d416b24ffc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -7,7 +7,7 @@
* version 2, as published by the Free Software Foundation.
*/
-/* File aq_hw.h: Declaraion of abstract interface for NIC hardware specific
+/* File aq_hw.h: Declaration of abstract interface for NIC hardware specific
* functions.
*/
@@ -15,12 +15,15 @@
#define AQ_HW_H
#include "aq_common.h"
+#include "aq_rss.h"
+#include "hw_atl/hw_atl_utils.h"
/* NIC H/W capabilities */
struct aq_hw_caps_s {
u64 hw_features;
u64 link_speed_msk;
unsigned int hw_priv_flags;
+ u32 media_type;
u32 rxds;
u32 txds;
u32 txhwb_alignment;
@@ -28,7 +31,7 @@ struct aq_hw_caps_s {
u32 vecs;
u32 mtu;
u32 mac_regs_count;
- u8 ports;
+ u32 hw_alive_check_addr;
u8 msix_irqs;
u8 tcs;
u8 rxd_alignment;
@@ -39,7 +42,6 @@ struct aq_hw_caps_s {
u8 rx_rings;
bool flow_control;
bool is_64_dma;
- u32 fw_ver_expected;
};
struct aq_hw_link_status_s {
@@ -86,30 +88,43 @@ struct aq_stats_s {
#define AQ_HW_FLAG_ERRORS (AQ_HW_FLAG_ERR_HW | AQ_HW_FLAG_ERR_UNPLUG)
+#define AQ_NIC_FLAGS_IS_NOT_READY (AQ_NIC_FLAG_STOPPING | \
+ AQ_NIC_FLAG_RESETTING | AQ_NIC_FLAG_CLOSING | \
+ AQ_NIC_FLAG_ERR_UNPLUG | AQ_NIC_FLAG_ERR_HW)
+
+#define AQ_NIC_FLAGS_IS_NOT_TX_READY (AQ_NIC_FLAGS_IS_NOT_READY | \
+ AQ_NIC_LINK_DOWN)
+
+#define AQ_HW_MEDIA_TYPE_TP 1U
+#define AQ_HW_MEDIA_TYPE_FIBRE 2U
+
struct aq_hw_s {
- struct aq_obj_s header;
+ atomic_t flags;
+ u8 rbl_enabled:1;
struct aq_nic_cfg_s *aq_nic_cfg;
- struct aq_pci_func_s *aq_pci_func;
+ const struct aq_fw_ops *aq_fw_ops;
void __iomem *mmio;
- unsigned int not_ff_addr;
struct aq_hw_link_status_s aq_link_status;
+ struct hw_aq_atl_utils_mbox mbox;
+ struct hw_atl_stats_s last_stats;
+ struct aq_stats_s curr_stats;
+ u64 speed;
+ u32 itr_tx;
+ u32 itr_rx;
+ unsigned int chip_features;
+ u32 fw_ver_actual;
+ atomic_t dpc;
+ u32 mbox_addr;
+ u32 rpc_addr;
+ u32 rpc_tid;
+ struct hw_aq_atl_utils_fw_rpc rpc;
};
struct aq_ring_s;
struct aq_ring_param_s;
-struct aq_nic_cfg_s;
struct sk_buff;
struct aq_hw_ops {
- struct aq_hw_s *(*create)(struct aq_pci_func_s *aq_pci_func,
- unsigned int port, struct aq_hw_ops *ops);
-
- void (*destroy)(struct aq_hw_s *self);
-
- int (*get_hw_caps)(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps,
- unsigned short device,
- unsigned short subsystem_device);
int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
unsigned int frags);
@@ -123,20 +138,11 @@ struct aq_hw_ops {
int (*hw_ring_tx_head_update)(struct aq_hw_s *self,
struct aq_ring_s *aq_ring);
- int (*hw_get_mac_permanent)(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps,
- u8 *mac);
-
int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr);
- int (*hw_get_link_status)(struct aq_hw_s *self);
-
- int (*hw_set_link_speed)(struct aq_hw_s *self, u32 speed);
-
int (*hw_reset)(struct aq_hw_s *self);
- int (*hw_init)(struct aq_hw_s *self, struct aq_nic_cfg_s *aq_nic_cfg,
- u8 *mac_addr);
+ int (*hw_init)(struct aq_hw_s *self, u8 *mac_addr);
int (*hw_start)(struct aq_hw_s *self);
@@ -184,9 +190,8 @@ struct aq_hw_ops {
struct aq_rss_parameters *rss_params);
int (*hw_get_regs)(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff);
-
- int (*hw_update_stats)(struct aq_hw_s *self);
+ const struct aq_hw_caps_s *aq_hw_caps,
+ u32 *regs_buff);
struct aq_stats_s *(*hw_get_hw_stats)(struct aq_hw_s *self);
@@ -197,4 +202,20 @@ struct aq_hw_ops {
int (*hw_set_power)(struct aq_hw_s *self, unsigned int power_state);
};
+struct aq_fw_ops {
+ int (*init)(struct aq_hw_s *self);
+
+ int (*reset)(struct aq_hw_s *self);
+
+ int (*get_mac_permanent)(struct aq_hw_s *self, u8 *mac);
+
+ int (*set_link_speed)(struct aq_hw_s *self, u32 speed);
+
+ int (*set_state)(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state);
+
+ int (*update_link_status)(struct aq_hw_s *self);
+
+ int (*update_stats)(struct aq_hw_s *self);
+};
+
#endif /* AQ_HW_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
index 5f13465995f6..d526c4f19d34 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
@@ -13,6 +13,7 @@
#include "aq_hw_utils.h"
#include "aq_hw.h"
+#include "aq_nic.h"
void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
u32 shift, u32 val)
@@ -39,8 +40,10 @@ u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg)
{
u32 value = readl(hw->mmio + reg);
- if ((~0U) == value && (~0U) == readl(hw->mmio + hw->not_ff_addr))
- aq_utils_obj_set(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG);
+ if ((~0U) == value &&
+ (~0U) == readl(hw->mmio +
+ hw->aq_nic_cfg->aq_hw_caps->hw_alive_check_addr))
+ aq_utils_obj_set(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG);
return value;
}
@@ -54,11 +57,11 @@ int aq_hw_err_from_flags(struct aq_hw_s *hw)
{
int err = 0;
- if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+ if (aq_utils_obj_test(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
err = -ENXIO;
goto err_exit;
}
- if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_HW)) {
+ if (aq_utils_obj_test(&hw->flags, AQ_HW_FLAG_ERR_HW)) {
err = -EIO;
goto err_exit;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
index 03b72ddbffb9..dc88a1221f1d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
@@ -35,6 +35,9 @@ do { \
} \
} while (0)
+#define aq_pr_err(...) pr_err(AQ_CFG_DRV_NAME ": " __VA_ARGS__)
+#define aq_pr_trace(...) pr_info(AQ_CFG_DRV_NAME ": " __VA_ARGS__)
+
struct aq_hw_s;
void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 5d6c40d86775..ba5fe8c4125d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -13,49 +13,39 @@
#include "aq_nic.h"
#include "aq_pci_func.h"
#include "aq_ethtool.h"
-#include "hw_atl/hw_atl_a0.h"
-#include "hw_atl/hw_atl_b0.h"
#include <linux/netdevice.h>
#include <linux/module.h>
-static const struct pci_device_id aq_pci_tbl[] = {
- { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_0001), },
- { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D100), },
- { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D107), },
- { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D108), },
- { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D109), },
- {}
-};
-
-MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
-
MODULE_LICENSE("GPL v2");
MODULE_VERSION(AQ_CFG_DRV_VERSION);
MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
MODULE_DESCRIPTION(AQ_CFG_DRV_DESC);
-static struct aq_hw_ops *aq_pci_probe_get_hw_ops_by_id(struct pci_dev *pdev)
+static const struct net_device_ops aq_ndev_ops;
+
+struct net_device *aq_ndev_alloc(void)
{
- struct aq_hw_ops *ops = NULL;
+ struct net_device *ndev = NULL;
+ struct aq_nic_s *aq_nic = NULL;
+
+ ndev = alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX);
+ if (!ndev)
+ return NULL;
- ops = hw_atl_a0_get_ops_by_id(pdev);
- if (!ops)
- ops = hw_atl_b0_get_ops_by_id(pdev);
+ aq_nic = netdev_priv(ndev);
+ aq_nic->ndev = ndev;
+ ndev->netdev_ops = &aq_ndev_ops;
+ ndev->ethtool_ops = &aq_ethtool_ops;
- return ops;
+ return ndev;
}
static int aq_ndev_open(struct net_device *ndev)
{
- struct aq_nic_s *aq_nic = NULL;
int err = 0;
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
- aq_nic = aq_nic_alloc_hot(ndev);
- if (!aq_nic) {
- err = -ENOMEM;
- goto err_exit;
- }
err = aq_nic_init(aq_nic);
if (err < 0)
goto err_exit;
@@ -78,7 +68,6 @@ static int aq_ndev_close(struct net_device *ndev)
if (err < 0)
goto err_exit;
aq_nic_deinit(aq_nic);
- aq_nic_free_hot_resources(aq_nic);
err_exit:
return err;
@@ -150,15 +139,13 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
err = aq_nic_set_packet_filter(aq_nic, ndev->flags);
if (err < 0)
- goto err_exit;
+ return;
if (netdev_mc_count(ndev)) {
err = aq_nic_set_multicast_list(aq_nic, ndev);
if (err < 0)
- goto err_exit;
+ return;
}
-
-err_exit:;
}
static const struct net_device_ops aq_ndev_ops = {
@@ -170,66 +157,3 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_set_mac_address = aq_ndev_set_mac_address,
.ndo_set_features = aq_ndev_set_features
};
-
-static int aq_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *pci_id)
-{
- struct aq_hw_ops *aq_hw_ops = NULL;
- struct aq_pci_func_s *aq_pci_func = NULL;
- int err = 0;
-
- err = pci_enable_device(pdev);
- if (err < 0)
- goto err_exit;
- aq_hw_ops = aq_pci_probe_get_hw_ops_by_id(pdev);
- aq_pci_func = aq_pci_func_alloc(aq_hw_ops, pdev,
- &aq_ndev_ops, &aq_ethtool_ops);
- if (!aq_pci_func) {
- err = -ENOMEM;
- goto err_exit;
- }
- err = aq_pci_func_init(aq_pci_func);
- if (err < 0)
- goto err_exit;
-
-err_exit:
- if (err < 0) {
- if (aq_pci_func)
- aq_pci_func_free(aq_pci_func);
- }
- return err;
-}
-
-static void aq_pci_remove(struct pci_dev *pdev)
-{
- struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
-
- aq_pci_func_deinit(aq_pci_func);
- aq_pci_func_free(aq_pci_func);
-}
-
-static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
-{
- struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
-
- return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg);
-}
-
-static int aq_pci_resume(struct pci_dev *pdev)
-{
- struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
- pm_message_t pm_msg = PMSG_RESTORE;
-
- return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg);
-}
-
-static struct pci_driver aq_pci_ops = {
- .name = AQ_CFG_DRV_NAME,
- .id_table = aq_pci_tbl,
- .probe = aq_pci_probe,
- .remove = aq_pci_remove,
- .suspend = aq_pci_suspend,
- .resume = aq_pci_resume,
-};
-
-module_pci_driver(aq_pci_ops);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
index 9748e7e575e0..ce92152eb43e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
@@ -14,4 +14,6 @@
#include "aq_common.h"
+struct net_device *aq_ndev_alloc(void);
+
#endif /* AQ_MAIN_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 75a894a9251c..ebbaf63eaf47 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -14,7 +14,6 @@
#include "aq_vec.h"
#include "aq_hw.h"
#include "aq_pci_func.h"
-#include "aq_nic_internal.h"
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
@@ -61,19 +60,13 @@ static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
rss_params->indirection_table[i] = i & (num_rss_queues - 1);
}
-/* Fills aq_nic_cfg with valid defaults */
-static void aq_nic_cfg_init_defaults(struct aq_nic_s *self)
+/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
+void aq_nic_cfg_start(struct aq_nic_s *self)
{
struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
- cfg->aq_hw_caps = &self->aq_hw_caps;
-
- cfg->vecs = AQ_CFG_VECS_DEF;
cfg->tcs = AQ_CFG_TCS_DEF;
- cfg->rxds = AQ_CFG_RXDS_DEF;
- cfg->txds = AQ_CFG_TXDS_DEF;
-
cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
cfg->itr = aq_itr;
@@ -94,19 +87,13 @@ static void aq_nic_cfg_init_defaults(struct aq_nic_s *self)
cfg->vlan_id = 0U;
aq_nic_rss_init(self, cfg->num_rss_queues);
-}
-
-/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
-int aq_nic_cfg_start(struct aq_nic_s *self)
-{
- struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
/*descriptors */
- cfg->rxds = min(cfg->rxds, cfg->aq_hw_caps->rxds);
- cfg->txds = min(cfg->txds, cfg->aq_hw_caps->txds);
+ cfg->rxds = min(cfg->aq_hw_caps->rxds, AQ_CFG_RXDS_DEF);
+ cfg->txds = min(cfg->aq_hw_caps->txds, AQ_CFG_TXDS_DEF);
/*rss rings */
- cfg->vecs = min(cfg->vecs, cfg->aq_hw_caps->vecs);
+ cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
cfg->vecs = min(cfg->vecs, num_online_cpus());
/* cfg->vecs should be power of 2 for RSS */
if (cfg->vecs >= 8U)
@@ -120,23 +107,22 @@ int aq_nic_cfg_start(struct aq_nic_s *self)
cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
- cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func);
+ cfg->irq_type = aq_pci_func_get_irq_type(self);
if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
- (self->aq_hw_caps.vecs == 1U) ||
+ (cfg->aq_hw_caps->vecs == 1U) ||
(cfg->vecs == 1U)) {
cfg->is_rss = 0U;
cfg->vecs = 1U;
}
- cfg->link_speed_msk &= self->aq_hw_caps.link_speed_msk;
- cfg->hw_features = self->aq_hw_caps.hw_features;
- return 0;
+ cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
+ cfg->hw_features = cfg->aq_hw_caps->hw_features;
}
static int aq_nic_update_link_status(struct aq_nic_s *self)
{
- int err = self->aq_hw_ops.hw_get_link_status(self->aq_hw);
+ int err = self->aq_fw_ops->update_link_status(self->aq_hw);
if (err)
return err;
@@ -150,9 +136,9 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
self->link_status = self->aq_hw->aq_link_status;
if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
- aq_utils_obj_set(&self->header.flags,
+ aq_utils_obj_set(&self->flags,
AQ_NIC_FLAG_STARTED);
- aq_utils_obj_clear(&self->header.flags,
+ aq_utils_obj_clear(&self->flags,
AQ_NIC_LINK_DOWN);
netif_carrier_on(self->ndev);
netif_tx_wake_all_queues(self->ndev);
@@ -160,7 +146,7 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
netif_carrier_off(self->ndev);
netif_tx_disable(self->ndev);
- aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
+ aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN);
}
return 0;
}
@@ -171,15 +157,15 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
int ctimer = AQ_CFG_SERVICE_TIMER_INTERVAL;
int err = 0;
- if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
+ if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
goto err_exit;
err = aq_nic_update_link_status(self);
if (err)
goto err_exit;
- if (self->aq_hw_ops.hw_update_stats)
- self->aq_hw_ops.hw_update_stats(self->aq_hw);
+ if (self->aq_fw_ops->update_stats)
+ self->aq_fw_ops->update_stats(self->aq_hw);
aq_nic_update_ndev_stats(self);
@@ -205,60 +191,6 @@ static void aq_nic_polling_timer_cb(struct timer_list *t)
AQ_CFG_POLLING_TIMER_INTERVAL);
}
-static struct net_device *aq_nic_ndev_alloc(void)
-{
- return alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX);
-}
-
-struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
- const struct ethtool_ops *et_ops,
- struct pci_dev *pdev,
- struct aq_pci_func_s *aq_pci_func,
- unsigned int port,
- const struct aq_hw_ops *aq_hw_ops)
-{
- struct net_device *ndev = NULL;
- struct aq_nic_s *self = NULL;
- int err = 0;
-
- ndev = aq_nic_ndev_alloc();
- if (!ndev) {
- err = -ENOMEM;
- goto err_exit;
- }
-
- self = netdev_priv(ndev);
-
- ndev->netdev_ops = ndev_ops;
- ndev->ethtool_ops = et_ops;
-
- SET_NETDEV_DEV(ndev, &pdev->dev);
-
- ndev->if_port = port;
- self->ndev = ndev;
-
- self->aq_pci_func = aq_pci_func;
-
- self->aq_hw_ops = *aq_hw_ops;
- self->port = (u8)port;
-
- self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port,
- &self->aq_hw_ops);
- err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps,
- pdev->device, pdev->subsystem_device);
- if (err < 0)
- goto err_exit;
-
- aq_nic_cfg_init_defaults(self);
-
-err_exit:
- if (err < 0) {
- aq_nic_free_hot_resources(self);
- self = NULL;
- }
- return self;
-}
-
int aq_nic_ndev_register(struct aq_nic_s *self)
{
int err = 0;
@@ -267,10 +199,14 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
err = -EINVAL;
goto err_exit;
}
- err = self->aq_hw_ops.hw_get_mac_permanent(self->aq_hw,
- self->aq_nic_cfg.aq_hw_caps,
+
+ err = hw_atl_utils_initfw(self->aq_hw, &self->aq_fw_ops);
+ if (err)
+ goto err_exit;
+
+ err = self->aq_fw_ops->get_mac_permanent(self->aq_hw,
self->ndev->dev_addr);
- if (err < 0)
+ if (err)
goto err_exit;
#if defined(AQ_CFG_MAC_ADDR_PERMANENT)
@@ -281,83 +217,39 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
}
#endif
+ for (self->aq_vecs = 0; self->aq_vecs < aq_nic_get_cfg(self)->vecs;
+ self->aq_vecs++) {
+ self->aq_vec[self->aq_vecs] =
+ aq_vec_alloc(self, self->aq_vecs, aq_nic_get_cfg(self));
+ if (!self->aq_vec[self->aq_vecs]) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ }
+
netif_carrier_off(self->ndev);
netif_tx_disable(self->ndev);
err = register_netdev(self->ndev);
- if (err < 0)
+ if (err)
goto err_exit;
err_exit:
return err;
}
-int aq_nic_ndev_init(struct aq_nic_s *self)
+void aq_nic_ndev_init(struct aq_nic_s *self)
{
- struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
+ const struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg;
self->ndev->hw_features |= aq_hw_caps->hw_features;
self->ndev->features = aq_hw_caps->hw_features;
self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
- self->ndev->max_mtu = self->aq_hw_caps.mtu - ETH_FCS_LEN - ETH_HLEN;
-
- return 0;
-}
-
-void aq_nic_ndev_free(struct aq_nic_s *self)
-{
- if (!self->ndev)
- goto err_exit;
-
- if (self->ndev->reg_state == NETREG_REGISTERED)
- unregister_netdev(self->ndev);
-
- if (self->aq_hw)
- self->aq_hw_ops.destroy(self->aq_hw);
-
- free_netdev(self->ndev);
-
-err_exit:;
-}
-
-struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev)
-{
- struct aq_nic_s *self = NULL;
- int err = 0;
-
- if (!ndev) {
- err = -EINVAL;
- goto err_exit;
- }
- self = netdev_priv(ndev);
-
- if (!self) {
- err = -EINVAL;
- goto err_exit;
- }
- if (netif_running(ndev))
- netif_tx_disable(ndev);
- netif_carrier_off(self->ndev);
-
- for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs;
- self->aq_vecs++) {
- self->aq_vec[self->aq_vecs] =
- aq_vec_alloc(self, self->aq_vecs, &self->aq_nic_cfg);
- if (!self->aq_vec[self->aq_vecs]) {
- err = -ENOMEM;
- goto err_exit;
- }
- }
+ self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
-err_exit:
- if (err < 0) {
- aq_nic_free_hot_resources(self);
- self = NULL;
- }
- return self;
}
void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
@@ -366,11 +258,6 @@ void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
self->aq_ring_tx[idx] = ring;
}
-struct device *aq_nic_get_dev(struct aq_nic_s *self)
-{
- return self->ndev->dev.parent;
-}
-
struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
{
return self->ndev;
@@ -383,18 +270,20 @@ int aq_nic_init(struct aq_nic_s *self)
unsigned int i = 0U;
self->power_state = AQ_HW_POWER_STATE_D0;
- err = self->aq_hw_ops.hw_reset(self->aq_hw);
+ err = self->aq_hw_ops->hw_reset(self->aq_hw);
if (err < 0)
goto err_exit;
- err = self->aq_hw_ops.hw_init(self->aq_hw, &self->aq_nic_cfg,
- aq_nic_get_ndev(self)->dev_addr);
+ err = self->aq_hw_ops->hw_init(self->aq_hw,
+ aq_nic_get_ndev(self)->dev_addr);
if (err < 0)
goto err_exit;
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
- aq_vec_init(aq_vec, &self->aq_hw_ops, self->aq_hw);
+ aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw);
+
+ netif_carrier_off(self->ndev);
err_exit:
return err;
@@ -406,13 +295,13 @@ int aq_nic_start(struct aq_nic_s *self)
int err = 0;
unsigned int i = 0U;
- err = self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
+ err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
self->mc_list.ar,
self->mc_list.count);
if (err < 0)
goto err_exit;
- err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw,
+ err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
self->packet_filter);
if (err < 0)
goto err_exit;
@@ -424,7 +313,7 @@ int aq_nic_start(struct aq_nic_s *self)
goto err_exit;
}
- err = self->aq_hw_ops.hw_start(self->aq_hw);
+ err = self->aq_hw_ops->hw_start(self->aq_hw);
if (err < 0)
goto err_exit;
@@ -442,14 +331,14 @@ int aq_nic_start(struct aq_nic_s *self)
} else {
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
- err = aq_pci_func_alloc_irq(self->aq_pci_func, i,
+ err = aq_pci_func_alloc_irq(self, i,
self->ndev->name, aq_vec,
- aq_vec_get_affinity_mask(aq_vec));
+ aq_vec_get_affinity_mask(aq_vec));
if (err < 0)
goto err_exit;
}
- err = self->aq_hw_ops.hw_irq_enable(self->aq_hw,
+ err = self->aq_hw_ops->hw_irq_enable(self->aq_hw,
AQ_CFG_IRQ_MASK);
if (err < 0)
goto err_exit;
@@ -634,9 +523,8 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
frags = aq_nic_map_skb(self, skb, ring);
if (likely(frags)) {
- err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw,
- ring,
- frags);
+ err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
+ ring, frags);
if (err >= 0) {
++ring->stats.tx.packets;
ring->stats.tx.bytes += skb->len;
@@ -651,14 +539,14 @@ err_exit:
int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self)
{
- return self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw);
+ return self->aq_hw_ops->hw_interrupt_moderation_set(self->aq_hw);
}
int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
{
int err = 0;
- err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw, flags);
+ err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, flags);
if (err < 0)
goto err_exit;
@@ -690,11 +578,11 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
* multicast mask
*/
self->packet_filter |= IFF_ALLMULTI;
- self->aq_hw->aq_nic_cfg->mc_list_count = 0;
- return self->aq_hw_ops.hw_packet_filter_set(self->aq_hw,
- self->packet_filter);
+ self->aq_nic_cfg.mc_list_count = 0;
+ return self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
+ self->packet_filter);
} else {
- return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
+ return self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
self->mc_list.ar,
self->mc_list.count);
}
@@ -709,7 +597,7 @@ int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
{
- return self->aq_hw_ops.hw_set_mac_address(self->aq_hw, ndev->dev_addr);
+ return self->aq_hw_ops->hw_set_mac_address(self->aq_hw, ndev->dev_addr);
}
unsigned int aq_nic_get_link_speed(struct aq_nic_s *self)
@@ -724,8 +612,9 @@ int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
regs->version = 1;
- err = self->aq_hw_ops.hw_get_regs(self->aq_hw,
- &self->aq_hw_caps, regs_buff);
+ err = self->aq_hw_ops->hw_get_regs(self->aq_hw,
+ self->aq_nic_cfg.aq_hw_caps,
+ regs_buff);
if (err < 0)
goto err_exit;
@@ -735,7 +624,7 @@ err_exit:
int aq_nic_get_regs_count(struct aq_nic_s *self)
{
- return self->aq_hw_caps.mac_regs_count;
+ return self->aq_nic_cfg.aq_hw_caps->mac_regs_count;
}
void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
@@ -743,7 +632,7 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
unsigned int i = 0U;
unsigned int count = 0U;
struct aq_vec_s *aq_vec = NULL;
- struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw);
+ struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
if (!stats)
goto err_exit;
@@ -774,7 +663,6 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
i++;
data += i;
- count = 0U;
for (i = 0U, aq_vec = self->aq_vec[0];
aq_vec && self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
@@ -788,7 +676,7 @@ err_exit:;
static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
{
struct net_device *ndev = self->ndev;
- struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw);
+ struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
ndev->stats.rx_packets = stats->uprc + stats->mprc + stats->bprc;
ndev->stats.rx_bytes = stats->ubrc + stats->mbrc + stats->bbrc;
@@ -802,39 +690,46 @@ static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
void aq_nic_get_link_ksettings(struct aq_nic_s *self,
struct ethtool_link_ksettings *cmd)
{
- cmd->base.port = PORT_TP;
+ if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
+ cmd->base.port = PORT_FIBRE;
+ else
+ cmd->base.port = PORT_TP;
/* This driver supports only 10G capable adapters, so DUPLEX_FULL */
cmd->base.duplex = DUPLEX_FULL;
cmd->base.autoneg = self->aq_nic_cfg.is_autoneg;
ethtool_link_ksettings_zero_link_mode(cmd, supported);
- if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_10G)
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10G)
ethtool_link_ksettings_add_link_mode(cmd, supported,
10000baseT_Full);
- if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_5G)
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_5G)
ethtool_link_ksettings_add_link_mode(cmd, supported,
5000baseT_Full);
- if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_2GS)
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2GS)
ethtool_link_ksettings_add_link_mode(cmd, supported,
2500baseT_Full);
- if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_1G)
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G)
ethtool_link_ksettings_add_link_mode(cmd, supported,
1000baseT_Full);
- if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_100M)
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M)
ethtool_link_ksettings_add_link_mode(cmd, supported,
100baseT_Full);
- if (self->aq_hw_caps.flow_control)
+ if (self->aq_nic_cfg.aq_hw_caps->flow_control)
ethtool_link_ksettings_add_link_mode(cmd, supported,
Pause);
ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+
+ if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ else
+ ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
ethtool_link_ksettings_zero_link_mode(cmd, advertising);
@@ -865,7 +760,10 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Pause);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
+ if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+ else
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
}
int aq_nic_set_link_ksettings(struct aq_nic_s *self,
@@ -876,7 +774,7 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
int err = 0;
if (cmd->base.autoneg == AUTONEG_ENABLE) {
- rate = self->aq_hw_caps.link_speed_msk;
+ rate = self->aq_nic_cfg.aq_hw_caps->link_speed_msk;
self->aq_nic_cfg.is_autoneg = true;
} else {
speed = cmd->base.speed;
@@ -907,7 +805,7 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
goto err_exit;
break;
}
- if (!(self->aq_hw_caps.link_speed_msk & rate)) {
+ if (!(self->aq_nic_cfg.aq_hw_caps->link_speed_msk & rate)) {
err = -1;
goto err_exit;
}
@@ -915,7 +813,7 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
self->aq_nic_cfg.is_autoneg = false;
}
- err = self->aq_hw_ops.hw_set_link_speed(self->aq_hw, rate);
+ err = self->aq_fw_ops->set_link_speed(self->aq_hw, rate);
if (err < 0)
goto err_exit;
@@ -934,7 +832,7 @@ u32 aq_nic_get_fw_version(struct aq_nic_s *self)
{
u32 fw_version = 0U;
- self->aq_hw_ops.hw_get_fw_version(self->aq_hw, &fw_version);
+ self->aq_hw_ops->hw_get_fw_version(self->aq_hw, &fw_version);
return fw_version;
}
@@ -949,18 +847,18 @@ int aq_nic_stop(struct aq_nic_s *self)
del_timer_sync(&self->service_timer);
- self->aq_hw_ops.hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
+ self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
if (self->aq_nic_cfg.is_polling)
del_timer_sync(&self->polling_timer);
else
- aq_pci_func_free_irqs(self->aq_pci_func);
+ aq_pci_func_free_irqs(self);
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
aq_vec_stop(aq_vec);
- return self->aq_hw_ops.hw_stop(self->aq_hw);
+ return self->aq_hw_ops->hw_stop(self->aq_hw);
}
void aq_nic_deinit(struct aq_nic_s *self)
@@ -976,23 +874,23 @@ void aq_nic_deinit(struct aq_nic_s *self)
aq_vec_deinit(aq_vec);
if (self->power_state == AQ_HW_POWER_STATE_D0) {
- (void)self->aq_hw_ops.hw_deinit(self->aq_hw);
+ (void)self->aq_hw_ops->hw_deinit(self->aq_hw);
} else {
- (void)self->aq_hw_ops.hw_set_power(self->aq_hw,
+ (void)self->aq_hw_ops->hw_set_power(self->aq_hw,
self->power_state);
}
err_exit:;
}
-void aq_nic_free_hot_resources(struct aq_nic_s *self)
+void aq_nic_free_vectors(struct aq_nic_s *self)
{
unsigned int i = 0U;
if (!self)
goto err_exit;
- for (i = AQ_DIMOF(self->aq_vec); i--;) {
+ for (i = ARRAY_SIZE(self->aq_vec); i--;) {
if (self->aq_vec[i]) {
aq_vec_free(self->aq_vec[i]);
self->aq_vec[i] = NULL;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 3c9f8db03d5f..d16b0f1a95aa 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -14,26 +14,15 @@
#include "aq_common.h"
#include "aq_rss.h"
+#include "aq_hw.h"
struct aq_ring_s;
-struct aq_pci_func_s;
struct aq_hw_ops;
-
-#define AQ_NIC_FC_OFF 0U
-#define AQ_NIC_FC_TX 1U
-#define AQ_NIC_FC_RX 2U
-#define AQ_NIC_FC_FULL 3U
-#define AQ_NIC_FC_AUTO 4U
-
-#define AQ_NIC_RATE_10G BIT(0)
-#define AQ_NIC_RATE_5G BIT(1)
-#define AQ_NIC_RATE_5GSR BIT(2)
-#define AQ_NIC_RATE_2GS BIT(3)
-#define AQ_NIC_RATE_1G BIT(4)
-#define AQ_NIC_RATE_100M BIT(5)
+struct aq_fw_s;
+struct aq_vec_s;
struct aq_nic_cfg_s {
- struct aq_hw_caps_s *aq_hw_caps;
+ const struct aq_hw_caps_s *aq_hw_caps;
u64 hw_features;
u32 rxds; /* rx ring size, descriptors # */
u32 txds; /* tx ring size, descriptors # */
@@ -44,7 +33,6 @@ struct aq_nic_cfg_s {
u16 tx_itr;
u32 num_rss_queues;
u32 mtu;
- u32 ucp_0x364;
u32 flow_control;
u32 link_speed_msk;
u32 vlan_id;
@@ -69,20 +57,43 @@ struct aq_nic_cfg_s {
#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \
((_TC_) * AQ_CFG_TCS_MAX + (_VEC_))
-struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
- const struct ethtool_ops *et_ops,
- struct pci_dev *pdev,
- struct aq_pci_func_s *aq_pci_func,
- unsigned int port,
- const struct aq_hw_ops *aq_hw_ops);
-int aq_nic_ndev_init(struct aq_nic_s *self);
+struct aq_nic_s {
+ atomic_t flags;
+ struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
+ struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX];
+ struct aq_hw_s *aq_hw;
+ struct net_device *ndev;
+ unsigned int aq_vecs;
+ unsigned int packet_filter;
+ unsigned int power_state;
+ u8 port;
+ const struct aq_hw_ops *aq_hw_ops;
+ const struct aq_fw_ops *aq_fw_ops;
+ struct aq_nic_cfg_s aq_nic_cfg;
+ struct timer_list service_timer;
+ struct timer_list polling_timer;
+ struct aq_hw_link_status_s link_status;
+ struct {
+ u32 count;
+ u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN];
+ } mc_list;
+
+ struct pci_dev *pdev;
+ unsigned int msix_entry_mask;
+};
+
+static inline struct device *aq_nic_get_dev(struct aq_nic_s *self)
+{
+ return self->ndev->dev.parent;
+}
+
+void aq_nic_ndev_init(struct aq_nic_s *self);
struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev);
void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
struct aq_ring_s *ring);
-struct device *aq_nic_get_dev(struct aq_nic_s *self);
struct net_device *aq_nic_get_ndev(struct aq_nic_s *self);
int aq_nic_init(struct aq_nic_s *self);
-int aq_nic_cfg_start(struct aq_nic_s *self);
+void aq_nic_cfg_start(struct aq_nic_s *self);
int aq_nic_ndev_register(struct aq_nic_s *self);
void aq_nic_ndev_free(struct aq_nic_s *self);
int aq_nic_start(struct aq_nic_s *self);
@@ -93,6 +104,7 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data);
int aq_nic_stop(struct aq_nic_s *self);
void aq_nic_deinit(struct aq_nic_s *self);
void aq_nic_free_hot_resources(struct aq_nic_s *self);
+void aq_nic_free_vectors(struct aq_nic_s *self);
int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu);
int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev);
int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h
deleted file mode 100644
index e7d2711dc165..000000000000
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- */
-
-/* File aq_nic_internal.h: Definition of private object structure. */
-
-#ifndef AQ_NIC_INTERNAL_H
-#define AQ_NIC_INTERNAL_H
-
-struct aq_nic_s {
- struct aq_obj_s header;
- struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
- struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX];
- struct aq_hw_s *aq_hw;
- struct net_device *ndev;
- struct aq_pci_func_s *aq_pci_func;
- unsigned int aq_vecs;
- unsigned int packet_filter;
- unsigned int power_state;
- u8 port;
- struct aq_hw_ops aq_hw_ops;
- struct aq_hw_caps_s aq_hw_caps;
- struct aq_nic_cfg_s aq_nic_cfg;
- struct timer_list service_timer;
- struct timer_list polling_timer;
- struct aq_hw_link_status_s link_status;
- struct {
- u32 count;
- u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN];
- } mc_list;
-};
-
-#define AQ_NIC_FLAGS_IS_NOT_READY (AQ_NIC_FLAG_STOPPING | \
- AQ_NIC_FLAG_RESETTING | AQ_NIC_FLAG_CLOSING | \
- AQ_NIC_FLAG_ERR_UNPLUG | AQ_NIC_FLAG_ERR_HW)
-
-#define AQ_NIC_FLAGS_IS_NOT_TX_READY (AQ_NIC_FLAGS_IS_NOT_READY | \
- AQ_NIC_LINK_DOWN)
-
-#endif /* AQ_NIC_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 58c29d04b186..22889fc158f2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -9,181 +9,135 @@
/* File aq_pci_func.c: Definition of PCI functions. */
-#include "aq_pci_func.h"
+#include <linux/interrupt.h>
+#include <linux/module.h>
+
+#include "aq_main.h"
#include "aq_nic.h"
#include "aq_vec.h"
#include "aq_hw.h"
-#include <linux/interrupt.h>
-
-struct aq_pci_func_s {
- struct pci_dev *pdev;
- struct aq_nic_s *port[AQ_CFG_PCI_FUNC_PORTS];
- void __iomem *mmio;
- void *aq_vec[AQ_CFG_PCI_FUNC_MSIX_IRQS];
- resource_size_t mmio_pa;
- unsigned int msix_entry_mask;
- unsigned int ports;
- bool is_pci_enabled;
- bool is_regions;
- bool is_pci_using_dac;
- struct aq_hw_caps_s aq_hw_caps;
+#include "aq_pci_func.h"
+#include "hw_atl/hw_atl_a0.h"
+#include "hw_atl/hw_atl_b0.h"
+
+static const struct pci_device_id aq_pci_tbl[] = {
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D100), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D107), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D108), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D109), },
+
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112), },
+
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100S), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107S), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108S), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109S), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111S), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112S), },
+
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111E), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112E), },
+
+ {}
};
-struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
- struct pci_dev *pdev,
- const struct net_device_ops *ndev_ops,
- const struct ethtool_ops *eth_ops)
-{
- struct aq_pci_func_s *self = NULL;
- int err = 0;
- unsigned int port = 0U;
-
- if (!aq_hw_ops) {
- err = -EFAULT;
- goto err_exit;
- }
- self = kzalloc(sizeof(*self), GFP_KERNEL);
- if (!self) {
- err = -ENOMEM;
- goto err_exit;
- }
-
- pci_set_drvdata(pdev, self);
- self->pdev = pdev;
+static const struct aq_board_revision_s hw_atl_boards[] = {
+ { AQ_DEVICE_ID_0001, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, },
+ { AQ_DEVICE_ID_D100, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc100, },
+ { AQ_DEVICE_ID_D107, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, },
+ { AQ_DEVICE_ID_D108, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc108, },
+ { AQ_DEVICE_ID_D109, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc109, },
+
+ { AQ_DEVICE_ID_0001, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, },
+ { AQ_DEVICE_ID_D100, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc100, },
+ { AQ_DEVICE_ID_D107, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, },
+ { AQ_DEVICE_ID_D108, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc108, },
+ { AQ_DEVICE_ID_D109, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc109, },
+
+ { AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
+ { AQ_DEVICE_ID_AQC107, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
+ { AQ_DEVICE_ID_AQC108, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108, },
+ { AQ_DEVICE_ID_AQC109, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109, },
+ { AQ_DEVICE_ID_AQC111, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111, },
+ { AQ_DEVICE_ID_AQC112, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112, },
+
+ { AQ_DEVICE_ID_AQC100S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100s, },
+ { AQ_DEVICE_ID_AQC107S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107s, },
+ { AQ_DEVICE_ID_AQC108S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108s, },
+ { AQ_DEVICE_ID_AQC109S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109s, },
+ { AQ_DEVICE_ID_AQC111S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111s, },
+ { AQ_DEVICE_ID_AQC112S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112s, },
+
+ { AQ_DEVICE_ID_AQC111E, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111e, },
+ { AQ_DEVICE_ID_AQC112E, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112e, },
+};
- err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps, pdev->device,
- pdev->subsystem_device);
- if (err < 0)
- goto err_exit;
+MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
- self->ports = self->aq_hw_caps.ports;
+static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev,
+ const struct aq_hw_ops **ops,
+ const struct aq_hw_caps_s **caps)
+{
+ int i = 0;
- for (port = 0; port < self->ports; ++port) {
- struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops,
- pdev, self,
- port, aq_hw_ops);
+ if (pdev->vendor != PCI_VENDOR_ID_AQUANTIA)
+ return -EINVAL;
- if (!aq_nic) {
- err = -ENOMEM;
- goto err_exit;
+ for (i = 0; i < ARRAY_SIZE(hw_atl_boards); i++) {
+ if (hw_atl_boards[i].devid == pdev->device &&
+ (hw_atl_boards[i].revision == AQ_HWREV_ANY ||
+ hw_atl_boards[i].revision == pdev->revision)) {
+ *ops = hw_atl_boards[i].ops;
+ *caps = hw_atl_boards[i].caps;
+ break;
}
- self->port[port] = aq_nic;
}
-err_exit:
- if (err < 0) {
- if (self)
- aq_pci_func_free(self);
- self = NULL;
- }
+ if (i == ARRAY_SIZE(hw_atl_boards))
+ return -EINVAL;
- (void)err;
- return self;
+ return 0;
}
-int aq_pci_func_init(struct aq_pci_func_s *self)
+int aq_pci_func_init(struct pci_dev *pdev)
{
int err = 0;
- unsigned int bar = 0U;
- unsigned int port = 0U;
- unsigned int numvecs = 0U;
-
- err = pci_enable_device(self->pdev);
- if (err < 0)
- goto err_exit;
- self->is_pci_enabled = true;
-
- err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(64));
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (!err) {
- err = pci_set_consistent_dma_mask(self->pdev, DMA_BIT_MASK(64));
- self->is_pci_using_dac = 1;
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+
}
if (err) {
- err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(32));
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (!err)
- err = pci_set_consistent_dma_mask(self->pdev,
+ err = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(32));
- self->is_pci_using_dac = 0;
}
if (err != 0) {
err = -ENOSR;
goto err_exit;
}
- err = pci_request_regions(self->pdev, AQ_CFG_DRV_NAME "_mmio");
+ err = pci_request_regions(pdev, AQ_CFG_DRV_NAME "_mmio");
if (err < 0)
goto err_exit;
- self->is_regions = true;
+ pci_set_master(pdev);
- pci_set_master(self->pdev);
-
- for (bar = 0; bar < 4; ++bar) {
- if (IORESOURCE_MEM & pci_resource_flags(self->pdev, bar)) {
- resource_size_t reg_sz;
-
- self->mmio_pa = pci_resource_start(self->pdev, bar);
- if (self->mmio_pa == 0U) {
- err = -EIO;
- goto err_exit;
- }
-
- reg_sz = pci_resource_len(self->pdev, bar);
- if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
- err = -EIO;
- goto err_exit;
- }
-
- self->mmio = ioremap_nocache(self->mmio_pa, reg_sz);
- if (!self->mmio) {
- err = -EIO;
- goto err_exit;
- }
- break;
- }
- }
-
- numvecs = min((u8)AQ_CFG_VECS_DEF, self->aq_hw_caps.msix_irqs);
- numvecs = min(numvecs, num_online_cpus());
-
- /* enable interrupts */
-#if !AQ_CFG_FORCE_LEGACY_INT
- err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs, PCI_IRQ_MSIX);
-
- if (err < 0) {
- err = pci_alloc_irq_vectors(self->pdev, 1, 1,
- PCI_IRQ_MSI | PCI_IRQ_LEGACY);
- if (err < 0)
- goto err_exit;
- }
-#endif /* AQ_CFG_FORCE_LEGACY_INT */
-
- /* net device init */
- for (port = 0; port < self->ports; ++port) {
- if (!self->port[port])
- continue;
-
- err = aq_nic_cfg_start(self->port[port]);
- if (err < 0)
- goto err_exit;
-
- err = aq_nic_ndev_init(self->port[port]);
- if (err < 0)
- goto err_exit;
-
- err = aq_nic_ndev_register(self->port[port]);
- if (err < 0)
- goto err_exit;
- }
+ return 0;
err_exit:
- if (err < 0)
- aq_pci_func_deinit(self);
return err;
}
-int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i,
+int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
char *name, void *aq_vec, cpumask_t *affinity_mask)
{
struct pci_dev *pdev = self->pdev;
@@ -204,11 +158,10 @@ int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i,
irq_set_affinity_hint(pci_irq_vector(pdev, i),
affinity_mask);
}
-
return err;
}
-void aq_pci_func_free_irqs(struct aq_pci_func_s *self)
+void aq_pci_func_free_irqs(struct aq_nic_s *self)
{
struct pci_dev *pdev = self->pdev;
unsigned int i = 0U;
@@ -224,12 +177,7 @@ void aq_pci_func_free_irqs(struct aq_pci_func_s *self)
}
}
-void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self)
-{
- return self->mmio;
-}
-
-unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self)
+unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self)
{
if (self->pdev->msix_enabled)
return AQ_HW_IRQ_MSIX;
@@ -238,62 +186,159 @@ unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self)
return AQ_HW_IRQ_LEGACY;
}
-void aq_pci_func_deinit(struct aq_pci_func_s *self)
+static void aq_pci_free_irq_vectors(struct aq_nic_s *self)
{
- if (!self)
- goto err_exit;
-
- aq_pci_func_free_irqs(self);
pci_free_irq_vectors(self->pdev);
-
- if (self->is_regions)
- pci_release_regions(self->pdev);
-
- if (self->is_pci_enabled)
- pci_disable_device(self->pdev);
-
-err_exit:;
}
-void aq_pci_func_free(struct aq_pci_func_s *self)
+static int aq_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
{
- unsigned int port = 0U;
+ struct aq_nic_s *self = NULL;
+ int err = 0;
+ struct net_device *ndev;
+ resource_size_t mmio_pa;
+ u32 bar;
+ u32 numvecs;
- if (!self)
- goto err_exit;
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
- for (port = 0; port < self->ports; ++port) {
- if (!self->port[port])
- continue;
+ err = aq_pci_func_init(pdev);
+ if (err)
+ goto err_pci_func;
- aq_nic_ndev_free(self->port[port]);
+ ndev = aq_ndev_alloc();
+ if (!ndev) {
+ err = -ENOMEM;
+ goto err_ndev;
}
- if (self->mmio)
- iounmap(self->mmio);
+ self = netdev_priv(ndev);
+ self->pdev = pdev;
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ pci_set_drvdata(pdev, self);
- kfree(self);
+ err = aq_pci_probe_get_hw_by_id(pdev, &self->aq_hw_ops,
+ &aq_nic_get_cfg(self)->aq_hw_caps);
+ if (err)
+ goto err_ioremap;
-err_exit:;
-}
+ self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL);
+ self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self);
-int aq_pci_func_change_pm_state(struct aq_pci_func_s *self,
- pm_message_t *pm_msg)
-{
- int err = 0;
- unsigned int port = 0U;
+ for (bar = 0; bar < 4; ++bar) {
+ if (IORESOURCE_MEM & pci_resource_flags(pdev, bar)) {
+ resource_size_t reg_sz;
- if (!self) {
- err = -EFAULT;
- goto err_exit;
+ mmio_pa = pci_resource_start(pdev, bar);
+ if (mmio_pa == 0U) {
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ reg_sz = pci_resource_len(pdev, bar);
+ if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz);
+ if (!self->aq_hw->mmio) {
+ err = -EIO;
+ goto err_ioremap;
+ }
+ break;
+ }
}
- for (port = 0; port < self->ports; ++port) {
- if (!self->port[port])
- continue;
- (void)aq_nic_change_pm_state(self->port[port], pm_msg);
+ if (bar == 4) {
+ err = -EIO;
+ goto err_ioremap;
}
-err_exit:
+ numvecs = min((u8)AQ_CFG_VECS_DEF,
+ aq_nic_get_cfg(self)->aq_hw_caps->msix_irqs);
+ numvecs = min(numvecs, num_online_cpus());
+ /*enable interrupts */
+#if !AQ_CFG_FORCE_LEGACY_INT
+ err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs,
+ PCI_IRQ_MSIX);
+
+ if (err < 0) {
+ err = pci_alloc_irq_vectors(self->pdev, 1, 1,
+ PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+ if (err < 0)
+ goto err_hwinit;
+ }
+#endif
+
+ /* net device init */
+ aq_nic_cfg_start(self);
+
+ aq_nic_ndev_init(self);
+
+ err = aq_nic_ndev_register(self);
+ if (err < 0)
+ goto err_register;
+
+ return 0;
+
+err_register:
+ aq_nic_free_vectors(self);
+ aq_pci_free_irq_vectors(self);
+err_hwinit:
+ iounmap(self->aq_hw->mmio);
+err_ioremap:
+ free_netdev(ndev);
+err_pci_func:
+ pci_release_regions(pdev);
+err_ndev:
+ pci_disable_device(pdev);
return err;
}
+
+static void aq_pci_remove(struct pci_dev *pdev)
+{
+ struct aq_nic_s *self = pci_get_drvdata(pdev);
+
+ if (self->ndev) {
+ if (self->ndev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(self->ndev);
+ aq_nic_free_vectors(self);
+ aq_pci_free_irq_vectors(self);
+ iounmap(self->aq_hw->mmio);
+ kfree(self->aq_hw);
+ pci_release_regions(pdev);
+ free_netdev(self->ndev);
+ }
+
+ pci_disable_device(pdev);
+}
+
+static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
+{
+ struct aq_nic_s *self = pci_get_drvdata(pdev);
+
+ return aq_nic_change_pm_state(self, &pm_msg);
+}
+
+static int aq_pci_resume(struct pci_dev *pdev)
+{
+ struct aq_nic_s *self = pci_get_drvdata(pdev);
+ pm_message_t pm_msg = PMSG_RESTORE;
+
+ return aq_nic_change_pm_state(self, &pm_msg);
+}
+
+static struct pci_driver aq_pci_ops = {
+ .name = AQ_CFG_DRV_NAME,
+ .id_table = aq_pci_tbl,
+ .probe = aq_pci_probe,
+ .remove = aq_pci_remove,
+ .suspend = aq_pci_suspend,
+ .resume = aq_pci_resume,
+};
+
+module_pci_driver(aq_pci_ops);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
index ecb033791203..aeee67bf69fa 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
@@ -13,22 +13,20 @@
#define AQ_PCI_FUNC_H
#include "aq_common.h"
+#include "aq_nic.h"
-struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *hw_ops,
- struct pci_dev *pdev,
- const struct net_device_ops *ndev_ops,
- const struct ethtool_ops *eth_ops);
-int aq_pci_func_init(struct aq_pci_func_s *self);
-int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i,
+struct aq_board_revision_s {
+ unsigned short devid;
+ unsigned short revision;
+ const struct aq_hw_ops *ops;
+ const struct aq_hw_caps_s *caps;
+};
+
+int aq_pci_func_init(struct pci_dev *pdev);
+int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
char *name, void *aq_vec,
cpumask_t *affinity_mask);
-void aq_pci_func_free_irqs(struct aq_pci_func_s *self);
-int aq_pci_func_start(struct aq_pci_func_s *self);
-void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self);
-unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self);
-void aq_pci_func_deinit(struct aq_pci_func_s *self);
-void aq_pci_func_free(struct aq_pci_func_s *self);
-int aq_pci_func_change_pm_state(struct aq_pci_func_s *self,
- pm_message_t *pm_msg);
+void aq_pci_func_free_irqs(struct aq_nic_s *self);
+unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self);
#endif /* AQ_PCI_FUNC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 519ca6534b85..0be6a11370bb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -279,10 +279,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
skb_record_rx_queue(skb, self->idx);
- napi_gro_receive(napi, skb);
-
++self->stats.rx.packets;
self->stats.rx.bytes += skb->len;
+
+ napi_gro_receive(napi, skb);
}
err_exit:
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 5844078764bd..965fae0fb6e0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -15,6 +15,7 @@
#include "aq_common.h"
struct page;
+struct aq_nic_cfg_s;
/* TxC SOP DX EOP
* +----------+----------+----------+-----------
@@ -105,7 +106,6 @@ union aq_ring_stats_s {
};
struct aq_ring_s {
- struct aq_obj_s header;
struct aq_ring_buff_s *buff_ring;
u8 *dx_ring; /* descriptors ring, dma shared mem */
struct aq_nic_s *aq_nic;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
index e12bcdfb874a..786ea8187c69 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
@@ -14,12 +14,6 @@
#include "aq_common.h"
-#define AQ_DIMOF(_ARY_) ARRAY_SIZE(_ARY_)
-
-struct aq_obj_s {
- atomic_t flags;
-};
-
static inline void aq_utils_obj_set(atomic_t *flags, u32 mask)
{
unsigned long flags_old, flags_new;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index 5fecc9a099ef..f890b8a5a862 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -19,8 +19,7 @@
#include <linux/netdevice.h>
struct aq_vec_s {
- struct aq_obj_s header;
- struct aq_hw_ops *aq_hw_ops;
+ const struct aq_hw_ops *aq_hw_ops;
struct aq_hw_s *aq_hw;
struct aq_nic_s *aq_nic;
unsigned int tx_rings;
@@ -166,7 +165,7 @@ err_exit:
return self;
}
-int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
+int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
struct aq_hw_s *aq_hw)
{
struct aq_ring_s *ring = NULL;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
index 6c68b184236c..8bdf60bb3f63 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
@@ -19,6 +19,8 @@
struct aq_hw_s;
struct aq_hw_ops;
+struct aq_nic_s;
+struct aq_nic_cfg_s;
struct aq_ring_stats_rx_s;
struct aq_ring_stats_tx_s;
@@ -26,7 +28,7 @@ irqreturn_t aq_vec_isr(int irq, void *private);
irqreturn_t aq_vec_isr_legacy(int irq, void *private);
struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg);
-int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
+int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
struct aq_hw_s *aq_hw);
void aq_vec_deinit(struct aq_vec_s *self);
void aq_vec_free(struct aq_vec_s *self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index f18dce14c93c..67e2f9fb9402 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -12,78 +12,100 @@
#include "../aq_hw.h"
#include "../aq_hw_utils.h"
#include "../aq_ring.h"
+#include "../aq_nic.h"
#include "hw_atl_a0.h"
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
#include "hw_atl_a0_internal.h"
-static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps,
- unsigned short device,
- unsigned short subsystem_device)
-{
- memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps));
-
- if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001)
- aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G;
-
- if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) {
- aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G;
- aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_5G;
- }
-
- return 0;
-}
-
-static struct aq_hw_s *hw_atl_a0_create(struct aq_pci_func_s *aq_pci_func,
- unsigned int port,
- struct aq_hw_ops *ops)
-{
- struct hw_atl_s *self = NULL;
-
- self = kzalloc(sizeof(*self), GFP_KERNEL);
- if (!self)
- goto err_exit;
-
- self->base.aq_pci_func = aq_pci_func;
+#define DEFAULT_A0_BOARD_BASIC_CAPABILITIES \
+ .is_64_dma = true, \
+ .msix_irqs = 4U, \
+ .irq_mask = ~0U, \
+ .vecs = HW_ATL_A0_RSS_MAX, \
+ .tcs = HW_ATL_A0_TC_MAX, \
+ .rxd_alignment = 1U, \
+ .rxd_size = HW_ATL_A0_RXD_SIZE, \
+ .rxds = 248U, \
+ .txd_alignment = 1U, \
+ .txd_size = HW_ATL_A0_TXD_SIZE, \
+ .txds = 8U * 1024U, \
+ .txhwb_alignment = 4096U, \
+ .tx_rings = HW_ATL_A0_TX_RINGS, \
+ .rx_rings = HW_ATL_A0_RX_RINGS, \
+ .hw_features = NETIF_F_HW_CSUM | \
+ NETIF_F_RXHASH | \
+ NETIF_F_RXCSUM | \
+ NETIF_F_SG | \
+ NETIF_F_TSO, \
+ .hw_priv_flags = IFF_UNICAST_FLT, \
+ .flow_control = true, \
+ .mtu = HW_ATL_A0_MTU_JUMBO, \
+ .mac_regs_count = 88, \
+ .hw_alive_check_addr = 0x10U
+
+const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = {
+ DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_FIBRE,
+ .link_speed_msk = HW_ATL_A0_RATE_5G |
+ HW_ATL_A0_RATE_2G5 |
+ HW_ATL_A0_RATE_1G |
+ HW_ATL_A0_RATE_100M,
+};
- self->base.not_ff_addr = 0x10U;
+const struct aq_hw_caps_s hw_atl_a0_caps_aqc107 = {
+ DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = HW_ATL_A0_RATE_10G |
+ HW_ATL_A0_RATE_5G |
+ HW_ATL_A0_RATE_2G5 |
+ HW_ATL_A0_RATE_1G |
+ HW_ATL_A0_RATE_100M,
+};
-err_exit:
- return (struct aq_hw_s *)self;
-}
+const struct aq_hw_caps_s hw_atl_a0_caps_aqc108 = {
+ DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = HW_ATL_A0_RATE_5G |
+ HW_ATL_A0_RATE_2G5 |
+ HW_ATL_A0_RATE_1G |
+ HW_ATL_A0_RATE_100M,
+};
-static void hw_atl_a0_destroy(struct aq_hw_s *self)
-{
- kfree(self);
-}
+const struct aq_hw_caps_s hw_atl_a0_caps_aqc109 = {
+ DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = HW_ATL_A0_RATE_2G5 |
+ HW_ATL_A0_RATE_1G |
+ HW_ATL_A0_RATE_100M,
+};
static int hw_atl_a0_hw_reset(struct aq_hw_s *self)
{
int err = 0;
- glb_glb_reg_res_dis_set(self, 1U);
- pci_pci_reg_res_dis_set(self, 0U);
- rx_rx_reg_res_dis_set(self, 0U);
- tx_tx_reg_res_dis_set(self, 0U);
+ hw_atl_glb_glb_reg_res_dis_set(self, 1U);
+ hw_atl_pci_pci_reg_res_dis_set(self, 0U);
+ hw_atl_rx_rx_reg_res_dis_set(self, 0U);
+ hw_atl_tx_tx_reg_res_dis_set(self, 0U);
HW_ATL_FLUSH();
- glb_soft_res_set(self, 1);
+ hw_atl_glb_soft_res_set(self, 1);
/* check 10 times by 1ms */
- AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U);
+ AQ_HW_WAIT_FOR(hw_atl_glb_soft_res_get(self) == 0, 1000U, 10U);
if (err < 0)
goto err_exit;
- itr_irq_reg_res_dis_set(self, 0U);
- itr_res_irq_set(self, 1U);
+ hw_atl_itr_irq_reg_res_dis_set(self, 0U);
+ hw_atl_itr_res_irq_set(self, 1U);
/* check 10 times by 1ms */
- AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U);
+ AQ_HW_WAIT_FOR(hw_atl_itr_res_irq_get(self) == 0, 1000U, 10U);
if (err < 0)
goto err_exit;
- hw_atl_utils_mpi_set(self, MPI_RESET, 0x0U);
+ self->aq_fw_ops->set_state(self, MPI_RESET);
err = aq_hw_err_from_flags(self);
@@ -99,51 +121,53 @@ static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
bool is_rx_flow_control = false;
/* TPS Descriptor rate init */
- tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
- tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
+ hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
+ hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
/* TPS VM init */
- tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
/* TPS TC credits init */
- tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
- tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
+ hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
- tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
- tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
- tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
- tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
+ hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
+ hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
/* Tx buf size */
buff_size = HW_ATL_A0_TXBUF_MAX;
- tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
- tpb_tx_buff_hi_threshold_per_tc_set(self,
- (buff_size * (1024 / 32U) * 66U) /
- 100U, tc);
- tpb_tx_buff_lo_threshold_per_tc_set(self,
- (buff_size * (1024 / 32U) * 50U) /
- 100U, tc);
+ hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+ hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
+ (buff_size *
+ (1024 / 32U) * 66U) /
+ 100U, tc);
+ hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
+ (buff_size *
+ (1024 / 32U) * 50U) /
+ 100U, tc);
/* QoS Rx buf size per TC */
tc = 0;
is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
buff_size = HW_ATL_A0_RXBUF_MAX;
- rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
- rpb_rx_buff_hi_threshold_per_tc_set(self,
- (buff_size *
- (1024U / 32U) * 66U) /
- 100U, tc);
- rpb_rx_buff_lo_threshold_per_tc_set(self,
- (buff_size *
- (1024U / 32U) * 50U) /
- 100U, tc);
- rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
+ hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+ hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
+ (buff_size *
+ (1024U / 32U) * 66U) /
+ 100U, tc);
+ hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
+ (buff_size *
+ (1024U / 32U) * 50U) /
+ 100U, tc);
+ hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
/* QoS 802.1p priority -> TC mapping */
for (i_priority = 8U; i_priority--;)
- rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
+ hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
return aq_hw_err_from_flags(self);
}
@@ -151,20 +175,19 @@ static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
- struct aq_nic_cfg_s *cfg = NULL;
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
int err = 0;
unsigned int i = 0U;
unsigned int addr = 0U;
- cfg = self->aq_nic_cfg;
-
for (i = 10, addr = 0U; i--; ++addr) {
u32 key_data = cfg->is_rss ?
__swab32(rss_params->hash_secret_key[i]) : 0U;
- rpf_rss_key_wr_data_set(self, key_data);
- rpf_rss_key_addr_set(self, addr);
- rpf_rss_key_wr_en_set(self, 1U);
- AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U);
+ hw_atl_rpf_rss_key_wr_data_set(self, key_data);
+ hw_atl_rpf_rss_key_addr_set(self, addr);
+ hw_atl_rpf_rss_key_wr_en_set(self, 1U);
+ AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0,
+ 1000U, 10U);
if (err < 0)
goto err_exit;
}
@@ -193,11 +216,12 @@ static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self,
((i * 3U) & 0xFU));
}
- for (i = AQ_DIMOF(bitary); i--;) {
- rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
- rpf_rss_redir_tbl_addr_set(self, i);
- rpf_rss_redir_wr_en_set(self, 1U);
- AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U);
+ for (i = ARRAY_SIZE(bitary); i--;) {
+ hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
+ hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
+ hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
+ AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0,
+ 1000U, 10U);
if (err < 0)
goto err_exit;
}
@@ -212,35 +236,35 @@ static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self,
struct aq_nic_cfg_s *aq_nic_cfg)
{
/* TX checksums offloads*/
- tpo_ipv4header_crc_offload_en_set(self, 1);
- tpo_tcp_udp_crc_offload_en_set(self, 1);
+ hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
+ hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
/* RX checksums offloads*/
- rpo_ipv4header_crc_offload_en_set(self, 1);
- rpo_tcp_udp_crc_offload_en_set(self, 1);
+ hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
+ hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
/* LSO offloads*/
- tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
+ hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self)
{
- thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
- thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
- thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
+ hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
/* Tx interrupts */
- tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
0x00010000U : 0x00000000U);
- tdm_tx_dca_en_set(self, 0U);
- tdm_tx_dca_mode_set(self, 0U);
+ hw_atl_tdm_tx_dca_en_set(self, 0U);
+ hw_atl_tdm_tx_dca_mode_set(self, 0U);
- tpb_tx_path_scp_ins_en_set(self, 1U);
+ hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
return aq_hw_err_from_flags(self);
}
@@ -251,38 +275,38 @@ static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self)
int i;
/* Rx TC/RSS number config */
- rpb_rpf_rx_traf_class_mode_set(self, 1U);
+ hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
/* Rx flow control */
- rpb_rx_flow_ctl_mode_set(self, 1U);
+ hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
/* RSS Ring selection */
- reg_rx_flr_rss_control1set(self, cfg->is_rss ?
+ hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
0xB3333333U : 0x00000000U);
/* Multicast filters */
for (i = HW_ATL_A0_MAC_MAX; i--;) {
- rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
- rpfl2unicast_flr_act_set(self, 1U, i);
+ hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
+ hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
}
- reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
- reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
+ hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
+ hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
/* Vlan filters */
- rpf_vlan_outer_etht_set(self, 0x88A8U);
- rpf_vlan_inner_etht_set(self, 0x8100U);
- rpf_vlan_prom_mode_en_set(self, 1);
+ hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
+ hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
+ hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
/* Rx Interrupts */
- rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
- rpfl2broadcast_flr_act_set(self, 1U);
- rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
+ hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
+ hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
- rdm_rx_dca_en_set(self, 0U);
- rdm_rx_dca_mode_set(self, 0U);
+ hw_atl_rdm_rx_dca_en_set(self, 0U);
+ hw_atl_rdm_rx_dca_mode_set(self, 0U);
return aq_hw_err_from_flags(self);
}
@@ -301,10 +325,10 @@ static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
(mac_addr[4] << 8) | mac_addr[5];
- rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC);
- rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC);
- rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_A0_MAC);
- rpfl2_uc_flr_en_set(self, 1U, HW_ATL_A0_MAC);
+ hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC);
+ hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC);
+ hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_A0_MAC);
+ hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_A0_MAC);
err = aq_hw_err_from_flags(self);
@@ -312,9 +336,7 @@ err_exit:
return err;
}
-static int hw_atl_a0_hw_init(struct aq_hw_s *self,
- struct aq_nic_cfg_s *aq_nic_cfg,
- u8 *mac_addr)
+static int hw_atl_a0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
{ 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
@@ -325,20 +347,18 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self,
int err = 0;
- self->aq_nic_cfg = aq_nic_cfg;
-
- hw_atl_utils_hw_chip_features_init(self,
- &PHAL_ATLANTIC_A0->chip_features);
+ struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
hw_atl_a0_hw_init_tx_path(self);
hw_atl_a0_hw_init_rx_path(self);
hw_atl_a0_hw_mac_addr_set(self, mac_addr);
- hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk);
+ self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk);
+ self->aq_fw_ops->set_state(self, MPI_INIT);
- reg_tx_dma_debug_ctl_set(self, 0x800000b8U);
- reg_tx_dma_debug_ctl_set(self, 0x000000b8U);
+ hw_atl_reg_tx_dma_debug_ctl_set(self, 0x800000b8U);
+ hw_atl_reg_tx_dma_debug_ctl_set(self, 0x000000b8U);
hw_atl_a0_hw_qos_set(self);
hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
@@ -346,26 +366,25 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self,
/* Reset link status and read out initial hardware counters */
self->aq_link_status.mbps = 0;
- hw_atl_utils_update_stats(self);
+ self->aq_fw_ops->update_stats(self);
err = aq_hw_err_from_flags(self);
if (err < 0)
goto err_exit;
/* Interrupts */
- reg_irq_glb_ctl_set(self,
- aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
- [(aq_nic_cfg->vecs > 1U) ?
- 1 : 0]);
+ hw_atl_reg_irq_glb_ctl_set(self,
+ aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
+ [(aq_nic_cfg->vecs > 1U) ? 1 : 0]);
- itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
+ hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
/* Interrupts */
- reg_gen_irq_map_set(self,
- ((HW_ATL_A0_ERR_INT << 0x18) | (1U << 0x1F)) |
- ((HW_ATL_A0_ERR_INT << 0x10) | (1U << 0x17)) |
- ((HW_ATL_A0_ERR_INT << 8) | (1U << 0xF)) |
- ((HW_ATL_A0_ERR_INT) | (1U << 0x7)), 0U);
+ hw_atl_reg_gen_irq_map_set(self,
+ ((HW_ATL_A0_ERR_INT << 0x18) | (1U << 0x1F)) |
+ ((HW_ATL_A0_ERR_INT << 0x10) | (1U << 0x17)) |
+ ((HW_ATL_A0_ERR_INT << 8) | (1U << 0xF)) |
+ ((HW_ATL_A0_ERR_INT) | (1U << 0x7)), 0U);
hw_atl_a0_hw_offload_set(self, aq_nic_cfg);
@@ -376,28 +395,28 @@ err_exit:
static int hw_atl_a0_hw_ring_tx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- tdm_tx_desc_en_set(self, 1, ring->idx);
+ hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_ring_rx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- rdm_rx_desc_en_set(self, 1, ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_start(struct aq_hw_s *self)
{
- tpb_tx_buff_en_set(self, 1);
- rpb_rx_buff_en_set(self, 1);
+ hw_atl_tpb_tx_buff_en_set(self, 1);
+ hw_atl_rpb_rx_buff_en_set(self, 1);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_tx_ring_tail_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+ hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
return 0;
}
@@ -483,36 +502,37 @@ static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self,
u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
- rdm_rx_desc_en_set(self, false, aq_ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
- rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
- reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
- aq_ring->idx);
+ hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
+ aq_ring->idx);
- reg_rx_dma_desc_base_addressmswset(self,
- dma_desc_addr_msw, aq_ring->idx);
+ hw_atl_reg_rx_dma_desc_base_addressmswset(self,
+ dma_desc_addr_msw,
+ aq_ring->idx);
- rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
- rdm_rx_desc_data_buff_size_set(self,
- AQ_CFG_RX_FRAME_MAX / 1024U,
+ hw_atl_rdm_rx_desc_data_buff_size_set(self,
+ AQ_CFG_RX_FRAME_MAX / 1024U,
aq_ring->idx);
- rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
- rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
- rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+ hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
/* Rx ring set mode */
/* Mapping interrupt vector */
- itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
- itr_irq_map_en_rx_set(self, true, aq_ring->idx);
+ hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+ hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx);
- rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
- rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
- rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
- rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+ hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
return aq_hw_err_from_flags(self);
}
@@ -524,25 +544,25 @@ static int hw_atl_a0_hw_ring_tx_init(struct aq_hw_s *self,
u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
- reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
- aq_ring->idx);
+ hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
+ aq_ring->idx);
- reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
- aq_ring->idx);
+ hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
+ aq_ring->idx);
- tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+ hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_a0_hw_tx_ring_tail_update(self, aq_ring);
/* Set Tx threshold */
- tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
+ hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
/* Mapping interrupt vector */
- itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
- itr_irq_map_en_tx_set(self, true, aq_ring->idx);
+ hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+ hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx);
- tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
- tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+ hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
return aq_hw_err_from_flags(self);
}
@@ -563,7 +583,7 @@ static int hw_atl_a0_hw_ring_rx_fill(struct aq_hw_s *self,
rxd->hdr_addr = 0U;
}
- reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
+ hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
return aq_hw_err_from_flags(self);
}
@@ -572,13 +592,13 @@ static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
int err = 0;
- unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx);
+ unsigned int hw_head = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
- if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+ if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
err = -ENXIO;
goto err_exit;
}
- ring->hw_head = hw_head_;
+ ring->hw_head = hw_head;
err = aq_hw_err_from_flags(self);
err_exit:
@@ -602,15 +622,16 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
if (!(rxd_wb->status & 0x5U)) { /* RxD is not done */
if ((1U << 4) &
- reg_rx_dma_desc_status_get(self, ring->idx)) {
- rdm_rx_desc_en_set(self, false, ring->idx);
- rdm_rx_desc_res_set(self, true, ring->idx);
- rdm_rx_desc_res_set(self, false, ring->idx);
- rdm_rx_desc_en_set(self, true, ring->idx);
+ hw_atl_reg_rx_dma_desc_status_get(self, ring->idx)) {
+ hw_atl_rdm_rx_desc_en_set(self, false, ring->idx);
+ hw_atl_rdm_rx_desc_res_set(self, true, ring->idx);
+ hw_atl_rdm_rx_desc_res_set(self, false, ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, true, ring->idx);
}
if (ring->hw_head ||
- (rdm_rx_desc_head_ptr_get(self, ring->idx) < 2U)) {
+ (hw_atl_rdm_rx_desc_head_ptr_get(self,
+ ring->idx) < 2U)) {
break;
} else if (!(rxd_wb->status & 0x1U)) {
struct hw_atl_rxd_wb_s *rxd_wb1 =
@@ -693,26 +714,25 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
static int hw_atl_a0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
{
- itr_irq_msk_setlsw_set(self, LODWORD(mask) |
+ hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask) |
(1U << HW_ATL_A0_ERR_INT));
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
{
- itr_irq_msk_clearlsw_set(self, LODWORD(mask));
- itr_irq_status_clearlsw_set(self, LODWORD(mask));
+ hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
+ hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
- if ((1U << 16) & reg_gen_irq_status_get(self))
-
- atomic_inc(&PHAL_ATLANTIC_A0->dpc);
+ if ((1U << 16) & hw_atl_reg_gen_irq_status_get(self))
+ atomic_inc(&self->dpc);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
{
- *mask = itr_irq_statuslsw_get(self);
+ *mask = hw_atl_itr_irq_statuslsw_get(self);
return aq_hw_err_from_flags(self);
}
@@ -723,18 +743,20 @@ static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self,
{
unsigned int i = 0U;
- rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
- rpfl2multicast_flr_en_set(self, IS_FILTER_ENABLED(IFF_MULTICAST), 0);
- rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
+ hw_atl_rpfl2promiscuous_mode_en_set(self,
+ IS_FILTER_ENABLED(IFF_PROMISC));
+ hw_atl_rpfl2multicast_flr_en_set(self,
+ IS_FILTER_ENABLED(IFF_MULTICAST), 0);
+ hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
self->aq_nic_cfg->is_mc_list_enabled =
IS_FILTER_ENABLED(IFF_MULTICAST);
for (i = HW_ATL_A0_MAC_MIN; i < HW_ATL_A0_MAC_MAX; ++i)
- rpfl2_uc_flr_en_set(self,
- (self->aq_nic_cfg->is_mc_list_enabled &&
- (i <= self->aq_nic_cfg->mc_list_count)) ?
- 1U : 0U, i);
+ hw_atl_rpfl2_uc_flr_en_set(self,
+ (self->aq_nic_cfg->is_mc_list_enabled &&
+ (i <= self->aq_nic_cfg->mc_list_count)) ?
+ 1U : 0U, i);
return aq_hw_err_from_flags(self);
}
@@ -761,17 +783,19 @@ static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
(ar_mac[i][4] << 8) | ar_mac[i][5];
- rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i);
+ hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i);
- rpfl2unicast_dest_addresslsw_set(self,
- l, HW_ATL_A0_MAC_MIN + i);
+ hw_atl_rpfl2unicast_dest_addresslsw_set(self,
+ l,
+ HW_ATL_A0_MAC_MIN + i);
- rpfl2unicast_dest_addressmsw_set(self,
- h, HW_ATL_A0_MAC_MIN + i);
+ hw_atl_rpfl2unicast_dest_addressmsw_set(self,
+ h,
+ HW_ATL_A0_MAC_MIN + i);
- rpfl2_uc_flr_en_set(self,
- (self->aq_nic_cfg->is_mc_list_enabled),
- HW_ATL_A0_MAC_MIN + i);
+ hw_atl_rpfl2_uc_flr_en_set(self,
+ (self->aq_nic_cfg->is_mc_list_enabled),
+ HW_ATL_A0_MAC_MIN + i);
}
err = aq_hw_err_from_flags(self);
@@ -823,7 +847,7 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self)
}
for (i = HW_ATL_A0_RINGS_MAX; i--;)
- reg_irq_thr_set(self, itr_rx, i);
+ hw_atl_reg_irq_thr_set(self, itr_rx, i);
return aq_hw_err_from_flags(self);
}
@@ -837,38 +861,19 @@ static int hw_atl_a0_hw_stop(struct aq_hw_s *self)
static int hw_atl_a0_hw_ring_tx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- tdm_tx_desc_en_set(self, 0U, ring->idx);
+ hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- rdm_rx_desc_en_set(self, 0U, ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
-static int hw_atl_a0_hw_set_speed(struct aq_hw_s *self, u32 speed)
-{
- int err = 0;
-
- err = hw_atl_utils_mpi_set_speed(self, speed, MPI_INIT);
- if (err < 0)
- goto err_exit;
-
-err_exit:
- return err;
-}
-
-static struct aq_hw_ops hw_atl_ops_ = {
- .create = hw_atl_a0_create,
- .destroy = hw_atl_a0_destroy,
- .get_hw_caps = hw_atl_a0_get_hw_caps,
-
- .hw_get_mac_permanent = hw_atl_utils_get_mac_permanent,
+const struct aq_hw_ops hw_atl_ops_a0 = {
.hw_set_mac_address = hw_atl_a0_hw_mac_addr_set,
- .hw_get_link_status = hw_atl_utils_mpi_get_link_status,
- .hw_set_link_speed = hw_atl_a0_hw_set_speed,
.hw_init = hw_atl_a0_hw_init,
.hw_deinit = hw_atl_utils_hw_deinit,
.hw_set_power = hw_atl_utils_hw_set_power,
@@ -898,21 +903,6 @@ static struct aq_hw_ops hw_atl_ops_ = {
.hw_rss_set = hw_atl_a0_hw_rss_set,
.hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set,
.hw_get_regs = hw_atl_utils_hw_get_regs,
- .hw_update_stats = hw_atl_utils_update_stats,
.hw_get_hw_stats = hw_atl_utils_get_hw_stats,
.hw_get_fw_version = hw_atl_utils_get_fw_version,
};
-
-struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev)
-{
- bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA);
- bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) ||
- (pdev->device == HW_ATL_DEVICE_ID_D100) ||
- (pdev->device == HW_ATL_DEVICE_ID_D107) ||
- (pdev->device == HW_ATL_DEVICE_ID_D108) ||
- (pdev->device == HW_ATL_DEVICE_ID_D109));
-
- bool is_rev_ok = (pdev->revision == 1U);
-
- return (is_vid_ok && is_did_ok && is_rev_ok) ? &hw_atl_ops_ : NULL;
-}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h
index 6e1d527954c9..25fe954def03 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h
@@ -16,19 +16,11 @@
#include "../aq_common.h"
-#ifndef PCI_VENDOR_ID_AQUANTIA
+extern const struct aq_hw_caps_s hw_atl_a0_caps_aqc100;
+extern const struct aq_hw_caps_s hw_atl_a0_caps_aqc107;
+extern const struct aq_hw_caps_s hw_atl_a0_caps_aqc108;
+extern const struct aq_hw_caps_s hw_atl_a0_caps_aqc109;
-#define PCI_VENDOR_ID_AQUANTIA 0x1D6A
-#define HW_ATL_DEVICE_ID_0001 0x0001
-#define HW_ATL_DEVICE_ID_D100 0xD100
-#define HW_ATL_DEVICE_ID_D107 0xD107
-#define HW_ATL_DEVICE_ID_D108 0xD108
-#define HW_ATL_DEVICE_ID_D109 0xD109
-
-#define HW_ATL_NIC_NAME "aQuantia AQtion 5Gbit Network Adapter"
-
-#endif
-
-struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev);
+extern const struct aq_hw_ops hw_atl_ops_a0;
#endif /* HW_ATL_A0_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
index 0592a0330cf0..1d8855558d74 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
@@ -88,69 +88,4 @@
#define HW_ATL_A0_FW_VER_EXPECTED 0x01050006U
-/* Hardware tx descriptor */
-struct __packed hw_atl_txd_s {
- u64 buf_addr;
- u32 ctl;
- u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
-};
-
-/* Hardware tx context descriptor */
-struct __packed hw_atl_txc_s {
- u32 rsvd;
- u32 len;
- u32 ctl;
- u32 len2;
-};
-
-/* Hardware rx descriptor */
-struct __packed hw_atl_rxd_s {
- u64 buf_addr;
- u64 hdr_addr;
-};
-
-/* Hardware rx descriptor writeback */
-struct __packed hw_atl_rxd_wb_s {
- u32 type;
- u32 rss_hash;
- u16 status;
- u16 pkt_len;
- u16 next_desc_ptr;
- u16 vlan;
-};
-
-/* HW layer capabilities */
-static struct aq_hw_caps_s hw_atl_a0_hw_caps_ = {
- .ports = 1U,
- .is_64_dma = true,
- .msix_irqs = 4U,
- .irq_mask = ~0U,
- .vecs = HW_ATL_A0_RSS_MAX,
- .tcs = HW_ATL_A0_TC_MAX,
- .rxd_alignment = 1U,
- .rxd_size = HW_ATL_A0_RXD_SIZE,
- .rxds = 248U,
- .txd_alignment = 1U,
- .txd_size = HW_ATL_A0_TXD_SIZE,
- .txds = 8U * 1024U,
- .txhwb_alignment = 4096U,
- .tx_rings = HW_ATL_A0_TX_RINGS,
- .rx_rings = HW_ATL_A0_RX_RINGS,
- .hw_features = NETIF_F_HW_CSUM |
- NETIF_F_RXCSUM |
- NETIF_F_RXHASH |
- NETIF_F_SG |
- NETIF_F_TSO,
- .hw_priv_flags = IFF_UNICAST_FLT,
- .link_speed_msk = (HW_ATL_A0_RATE_10G |
- HW_ATL_A0_RATE_5G |
- HW_ATL_A0_RATE_2G5 |
- HW_ATL_A0_RATE_1G |
- HW_ATL_A0_RATE_100M),
- .flow_control = true,
- .mtu = HW_ATL_A0_MTU_JUMBO,
- .mac_regs_count = 88,
- .fw_ver_expected = HW_ATL_A0_FW_VER_EXPECTED,
-};
-
#endif /* HW_ATL_A0_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index e4a22ce7bf09..819f6bcf9b4e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -12,83 +12,89 @@
#include "../aq_hw.h"
#include "../aq_hw_utils.h"
#include "../aq_ring.h"
+#include "../aq_nic.h"
#include "hw_atl_b0.h"
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
#include "hw_atl_b0_internal.h"
#include "hw_atl_llh_internal.h"
-static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps,
- unsigned short device,
- unsigned short subsystem_device)
-{
- memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps));
-
- if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001)
- aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G;
-
- if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) {
- aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G;
- aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_5G;
- }
-
- return 0;
-}
-
-static struct aq_hw_s *hw_atl_b0_create(struct aq_pci_func_s *aq_pci_func,
- unsigned int port,
- struct aq_hw_ops *ops)
-{
- struct hw_atl_s *self = NULL;
-
- self = kzalloc(sizeof(*self), GFP_KERNEL);
- if (!self)
- goto err_exit;
-
- self->base.aq_pci_func = aq_pci_func;
+#define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \
+ .is_64_dma = true, \
+ .msix_irqs = 4U, \
+ .irq_mask = ~0U, \
+ .vecs = HW_ATL_B0_RSS_MAX, \
+ .tcs = HW_ATL_B0_TC_MAX, \
+ .rxd_alignment = 1U, \
+ .rxd_size = HW_ATL_B0_RXD_SIZE, \
+ .rxds = 4U * 1024U, \
+ .txd_alignment = 1U, \
+ .txd_size = HW_ATL_B0_TXD_SIZE, \
+ .txds = 8U * 1024U, \
+ .txhwb_alignment = 4096U, \
+ .tx_rings = HW_ATL_B0_TX_RINGS, \
+ .rx_rings = HW_ATL_B0_RX_RINGS, \
+ .hw_features = NETIF_F_HW_CSUM | \
+ NETIF_F_RXCSUM | \
+ NETIF_F_RXHASH | \
+ NETIF_F_SG | \
+ NETIF_F_TSO | \
+ NETIF_F_LRO, \
+ .hw_priv_flags = IFF_UNICAST_FLT, \
+ .flow_control = true, \
+ .mtu = HW_ATL_B0_MTU_JUMBO, \
+ .mac_regs_count = 88, \
+ .hw_alive_check_addr = 0x10U
+
+const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
+ DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_FIBRE,
+ .link_speed_msk = HW_ATL_B0_RATE_10G |
+ HW_ATL_B0_RATE_5G |
+ HW_ATL_B0_RATE_2G5 |
+ HW_ATL_B0_RATE_1G |
+ HW_ATL_B0_RATE_100M,
+};
- self->base.not_ff_addr = 0x10U;
+const struct aq_hw_caps_s hw_atl_b0_caps_aqc107 = {
+ DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = HW_ATL_B0_RATE_10G |
+ HW_ATL_B0_RATE_5G |
+ HW_ATL_B0_RATE_2G5 |
+ HW_ATL_B0_RATE_1G |
+ HW_ATL_B0_RATE_100M,
+};
-err_exit:
- return (struct aq_hw_s *)self;
-}
+const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = {
+ DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = HW_ATL_B0_RATE_5G |
+ HW_ATL_B0_RATE_2G5 |
+ HW_ATL_B0_RATE_1G |
+ HW_ATL_B0_RATE_100M,
+};
-static void hw_atl_b0_destroy(struct aq_hw_s *self)
-{
- kfree(self);
-}
+const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = {
+ DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = HW_ATL_B0_RATE_2G5 |
+ HW_ATL_B0_RATE_1G |
+ HW_ATL_B0_RATE_100M,
+};
static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
{
int err = 0;
- glb_glb_reg_res_dis_set(self, 1U);
- pci_pci_reg_res_dis_set(self, 0U);
- rx_rx_reg_res_dis_set(self, 0U);
- tx_tx_reg_res_dis_set(self, 0U);
-
- HW_ATL_FLUSH();
- glb_soft_res_set(self, 1);
-
- /* check 10 times by 1ms */
- AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U);
- if (err < 0)
- goto err_exit;
-
- itr_irq_reg_res_dis_set(self, 0U);
- itr_res_irq_set(self, 1U);
-
- /* check 10 times by 1ms */
- AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U);
- if (err < 0)
- goto err_exit;
+ err = hw_atl_utils_soft_reset(self);
+ if (err)
+ return err;
- hw_atl_utils_mpi_set(self, MPI_RESET, 0x0U);
+ self->aq_fw_ops->set_state(self, MPI_RESET);
err = aq_hw_err_from_flags(self);
-err_exit:
return err;
}
@@ -100,51 +106,53 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
bool is_rx_flow_control = false;
/* TPS Descriptor rate init */
- tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
- tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
+ hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
+ hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
/* TPS VM init */
- tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
/* TPS TC credits init */
- tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
- tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
+ hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
- tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
- tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
- tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
- tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
+ hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
+ hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
/* Tx buf size */
buff_size = HW_ATL_B0_TXBUF_MAX;
- tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
- tpb_tx_buff_hi_threshold_per_tc_set(self,
- (buff_size * (1024 / 32U) * 66U) /
- 100U, tc);
- tpb_tx_buff_lo_threshold_per_tc_set(self,
- (buff_size * (1024 / 32U) * 50U) /
- 100U, tc);
+ hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+ hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
+ (buff_size *
+ (1024 / 32U) * 66U) /
+ 100U, tc);
+ hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
+ (buff_size *
+ (1024 / 32U) * 50U) /
+ 100U, tc);
/* QoS Rx buf size per TC */
tc = 0;
is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
buff_size = HW_ATL_B0_RXBUF_MAX;
- rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
- rpb_rx_buff_hi_threshold_per_tc_set(self,
- (buff_size *
- (1024U / 32U) * 66U) /
- 100U, tc);
- rpb_rx_buff_lo_threshold_per_tc_set(self,
- (buff_size *
- (1024U / 32U) * 50U) /
- 100U, tc);
- rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
+ hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+ hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
+ (buff_size *
+ (1024U / 32U) * 66U) /
+ 100U, tc);
+ hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
+ (buff_size *
+ (1024U / 32U) * 50U) /
+ 100U, tc);
+ hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
/* QoS 802.1p priority -> TC mapping */
for (i_priority = 8U; i_priority--;)
- rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
+ hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
return aq_hw_err_from_flags(self);
}
@@ -152,20 +160,19 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
- struct aq_nic_cfg_s *cfg = NULL;
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
int err = 0;
unsigned int i = 0U;
unsigned int addr = 0U;
- cfg = self->aq_nic_cfg;
-
for (i = 10, addr = 0U; i--; ++addr) {
u32 key_data = cfg->is_rss ?
__swab32(rss_params->hash_secret_key[i]) : 0U;
- rpf_rss_key_wr_data_set(self, key_data);
- rpf_rss_key_addr_set(self, addr);
- rpf_rss_key_wr_en_set(self, 1U);
- AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U);
+ hw_atl_rpf_rss_key_wr_data_set(self, key_data);
+ hw_atl_rpf_rss_key_addr_set(self, addr);
+ hw_atl_rpf_rss_key_wr_en_set(self, 1U);
+ AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0,
+ 1000U, 10U);
if (err < 0)
goto err_exit;
}
@@ -194,11 +201,12 @@ static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
((i * 3U) & 0xFU));
}
- for (i = AQ_DIMOF(bitary); i--;) {
- rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
- rpf_rss_redir_tbl_addr_set(self, i);
- rpf_rss_redir_wr_en_set(self, 1U);
- AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U);
+ for (i = ARRAY_SIZE(bitary); i--;) {
+ hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
+ hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
+ hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
+ AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0,
+ 1000U, 10U);
if (err < 0)
goto err_exit;
}
@@ -215,15 +223,15 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
unsigned int i;
/* TX checksums offloads*/
- tpo_ipv4header_crc_offload_en_set(self, 1);
- tpo_tcp_udp_crc_offload_en_set(self, 1);
+ hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
+ hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
/* RX checksums offloads*/
- rpo_ipv4header_crc_offload_en_set(self, 1);
- rpo_tcp_udp_crc_offload_en_set(self, 1);
+ hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
+ hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
/* LSO offloads*/
- tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
+ hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
/* LRO offloads */
{
@@ -232,43 +240,44 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
- rpo_lro_max_num_of_descriptors_set(self, val, i);
+ hw_atl_rpo_lro_max_num_of_descriptors_set(self, val, i);
- rpo_lro_time_base_divider_set(self, 0x61AU);
- rpo_lro_inactive_interval_set(self, 0);
- rpo_lro_max_coalescing_interval_set(self, 2);
+ hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU);
+ hw_atl_rpo_lro_inactive_interval_set(self, 0);
+ hw_atl_rpo_lro_max_coalescing_interval_set(self, 2);
- rpo_lro_qsessions_lim_set(self, 1U);
+ hw_atl_rpo_lro_qsessions_lim_set(self, 1U);
- rpo_lro_total_desc_lim_set(self, 2U);
+ hw_atl_rpo_lro_total_desc_lim_set(self, 2U);
- rpo_lro_patch_optimization_en_set(self, 0U);
+ hw_atl_rpo_lro_patch_optimization_en_set(self, 0U);
- rpo_lro_min_pay_of_first_pkt_set(self, 10U);
+ hw_atl_rpo_lro_min_pay_of_first_pkt_set(self, 10U);
- rpo_lro_pkt_lim_set(self, 1U);
+ hw_atl_rpo_lro_pkt_lim_set(self, 1U);
- rpo_lro_en_set(self, aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
+ hw_atl_rpo_lro_en_set(self,
+ aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
}
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
{
- thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
- thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
- thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
+ hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
/* Tx interrupts */
- tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
0x00010000U : 0x00000000U);
- tdm_tx_dca_en_set(self, 0U);
- tdm_tx_dca_mode_set(self, 0U);
+ hw_atl_tdm_tx_dca_en_set(self, 0U);
+ hw_atl_tdm_tx_dca_mode_set(self, 0U);
- tpb_tx_path_scp_ins_en_set(self, 1U);
+ hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
return aq_hw_err_from_flags(self);
}
@@ -279,55 +288,55 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
int i;
/* Rx TC/RSS number config */
- rpb_rpf_rx_traf_class_mode_set(self, 1U);
+ hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
/* Rx flow control */
- rpb_rx_flow_ctl_mode_set(self, 1U);
+ hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
/* RSS Ring selection */
- reg_rx_flr_rss_control1set(self, cfg->is_rss ?
+ hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
0xB3333333U : 0x00000000U);
/* Multicast filters */
for (i = HW_ATL_B0_MAC_MAX; i--;) {
- rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
- rpfl2unicast_flr_act_set(self, 1U, i);
+ hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
+ hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
}
- reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
- reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
+ hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
+ hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
/* Vlan filters */
- rpf_vlan_outer_etht_set(self, 0x88A8U);
- rpf_vlan_inner_etht_set(self, 0x8100U);
+ hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
+ hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
if (cfg->vlan_id) {
- rpf_vlan_flr_act_set(self, 1U, 0U);
- rpf_vlan_id_flr_set(self, 0U, 0U);
- rpf_vlan_flr_en_set(self, 0U, 0U);
+ hw_atl_rpf_vlan_flr_act_set(self, 1U, 0U);
+ hw_atl_rpf_vlan_id_flr_set(self, 0U, 0U);
+ hw_atl_rpf_vlan_flr_en_set(self, 0U, 0U);
- rpf_vlan_accept_untagged_packets_set(self, 1U);
- rpf_vlan_untagged_act_set(self, 1U);
+ hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
+ hw_atl_rpf_vlan_untagged_act_set(self, 1U);
- rpf_vlan_flr_act_set(self, 1U, 1U);
- rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
- rpf_vlan_flr_en_set(self, 1U, 1U);
+ hw_atl_rpf_vlan_flr_act_set(self, 1U, 1U);
+ hw_atl_rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
+ hw_atl_rpf_vlan_flr_en_set(self, 1U, 1U);
} else {
- rpf_vlan_prom_mode_en_set(self, 1);
+ hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
}
/* Rx Interrupts */
- rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
aq_hw_write_reg(self, 0x00005040U,
IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U);
- rpfl2broadcast_flr_act_set(self, 1U);
- rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
+ hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
+ hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
- rdm_rx_dca_en_set(self, 0U);
- rdm_rx_dca_mode_set(self, 0U);
+ hw_atl_rdm_rx_dca_en_set(self, 0U);
+ hw_atl_rdm_rx_dca_mode_set(self, 0U);
return aq_hw_err_from_flags(self);
}
@@ -346,10 +355,10 @@ static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
(mac_addr[4] << 8) | mac_addr[5];
- rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
- rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
- rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
- rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
+ hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
+ hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
+ hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
+ hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
err = aq_hw_err_from_flags(self);
@@ -357,9 +366,7 @@ err_exit:
return err;
}
-static int hw_atl_b0_hw_init(struct aq_hw_s *self,
- struct aq_nic_cfg_s *aq_nic_cfg,
- u8 *mac_addr)
+static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
{ 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
@@ -371,51 +378,50 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
int err = 0;
u32 val;
- self->aq_nic_cfg = aq_nic_cfg;
-
- hw_atl_utils_hw_chip_features_init(self,
- &PHAL_ATLANTIC_B0->chip_features);
+ struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
hw_atl_b0_hw_init_tx_path(self);
hw_atl_b0_hw_init_rx_path(self);
hw_atl_b0_hw_mac_addr_set(self, mac_addr);
- hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk);
+ self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk);
+ self->aq_fw_ops->set_state(self, MPI_INIT);
hw_atl_b0_hw_qos_set(self);
hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
/* Force limit MRRS on RDM/TDM to 2K */
- val = aq_hw_read_reg(self, pci_reg_control6_adr);
- aq_hw_write_reg(self, pci_reg_control6_adr, (val & ~0x707) | 0x404);
+ val = aq_hw_read_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR);
+ aq_hw_write_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR,
+ (val & ~0x707) | 0x404);
/* TX DMA total request limit. B0 hardware is not capable to
* handle more than (8K-MRRS) incoming DMA data.
* Value 24 in 256byte units
*/
- aq_hw_write_reg(self, tx_dma_total_req_limit_adr, 24);
+ aq_hw_write_reg(self, HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24);
/* Reset link status and read out initial hardware counters */
self->aq_link_status.mbps = 0;
- hw_atl_utils_update_stats(self);
+ self->aq_fw_ops->update_stats(self);
err = aq_hw_err_from_flags(self);
if (err < 0)
goto err_exit;
/* Interrupts */
- reg_irq_glb_ctl_set(self,
- aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
+ hw_atl_reg_irq_glb_ctl_set(self,
+ aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
[(aq_nic_cfg->vecs > 1U) ?
1 : 0]);
- itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
+ hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
/* Interrupts */
- reg_gen_irq_map_set(self,
- ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
+ hw_atl_reg_gen_irq_map_set(self,
+ ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U);
hw_atl_b0_hw_offload_set(self, aq_nic_cfg);
@@ -427,28 +433,28 @@ err_exit:
static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- tdm_tx_desc_en_set(self, 1, ring->idx);
+ hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- rdm_rx_desc_en_set(self, 1, ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_start(struct aq_hw_s *self)
{
- tpb_tx_buff_en_set(self, 1);
- rpb_rx_buff_en_set(self, 1);
+ hw_atl_tpb_tx_buff_en_set(self, 1);
+ hw_atl_rpb_rx_buff_en_set(self, 1);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+ hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
return 0;
}
@@ -534,36 +540,36 @@ static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
- rdm_rx_desc_en_set(self, false, aq_ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
- rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
- reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
- aq_ring->idx);
+ hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
+ aq_ring->idx);
- reg_rx_dma_desc_base_addressmswset(self,
- dma_desc_addr_msw, aq_ring->idx);
+ hw_atl_reg_rx_dma_desc_base_addressmswset(self,
+ dma_desc_addr_msw, aq_ring->idx);
- rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
- rdm_rx_desc_data_buff_size_set(self,
- AQ_CFG_RX_FRAME_MAX / 1024U,
+ hw_atl_rdm_rx_desc_data_buff_size_set(self,
+ AQ_CFG_RX_FRAME_MAX / 1024U,
aq_ring->idx);
- rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
- rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
- rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+ hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
/* Rx ring set mode */
/* Mapping interrupt vector */
- itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
- itr_irq_map_en_rx_set(self, true, aq_ring->idx);
+ hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+ hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx);
- rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
- rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
- rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
- rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+ hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
return aq_hw_err_from_flags(self);
}
@@ -575,25 +581,25 @@ static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
- reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
- aq_ring->idx);
+ hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
+ aq_ring->idx);
- reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
- aq_ring->idx);
+ hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
+ aq_ring->idx);
- tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+ hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring);
/* Set Tx threshold */
- tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
+ hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
/* Mapping interrupt vector */
- itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
- itr_irq_map_en_tx_set(self, true, aq_ring->idx);
+ hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+ hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx);
- tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
- tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+ hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
return aq_hw_err_from_flags(self);
}
@@ -614,7 +620,7 @@ static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self,
rxd->hdr_addr = 0U;
}
- reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
+ hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
return aq_hw_err_from_flags(self);
}
@@ -623,9 +629,9 @@ static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
int err = 0;
- unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx);
+ unsigned int hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
- if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+ if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
err = -ENXIO;
goto err_exit;
}
@@ -728,22 +734,22 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
{
- itr_irq_msk_setlsw_set(self, LODWORD(mask));
+ hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
{
- itr_irq_msk_clearlsw_set(self, LODWORD(mask));
- itr_irq_status_clearlsw_set(self, LODWORD(mask));
+ hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
+ hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
- atomic_inc(&PHAL_ATLANTIC_B0->dpc);
+ atomic_inc(&self->dpc);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
{
- *mask = itr_irq_statuslsw_get(self);
+ *mask = hw_atl_itr_irq_statuslsw_get(self);
return aq_hw_err_from_flags(self);
}
@@ -754,20 +760,20 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
{
unsigned int i = 0U;
- rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
- rpfl2multicast_flr_en_set(self,
- IS_FILTER_ENABLED(IFF_MULTICAST), 0);
+ hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
+ hw_atl_rpfl2multicast_flr_en_set(self,
+ IS_FILTER_ENABLED(IFF_MULTICAST), 0);
- rpfl2_accept_all_mc_packets_set(self,
- IS_FILTER_ENABLED(IFF_ALLMULTI));
+ hw_atl_rpfl2_accept_all_mc_packets_set(self,
+ IS_FILTER_ENABLED(IFF_ALLMULTI));
- rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
+ hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
self->aq_nic_cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
- rpfl2_uc_flr_en_set(self,
- (self->aq_nic_cfg->is_mc_list_enabled &&
+ hw_atl_rpfl2_uc_flr_en_set(self,
+ (self->aq_nic_cfg->is_mc_list_enabled &&
(i <= self->aq_nic_cfg->mc_list_count)) ?
1U : 0U, i);
@@ -796,16 +802,16 @@ static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
(ar_mac[i][4] << 8) | ar_mac[i][5];
- rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
+ hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
- rpfl2unicast_dest_addresslsw_set(self,
- l, HW_ATL_B0_MAC_MIN + i);
+ hw_atl_rpfl2unicast_dest_addresslsw_set(self,
+ l, HW_ATL_B0_MAC_MIN + i);
- rpfl2unicast_dest_addressmsw_set(self,
- h, HW_ATL_B0_MAC_MIN + i);
+ hw_atl_rpfl2unicast_dest_addressmsw_set(self,
+ h, HW_ATL_B0_MAC_MIN + i);
- rpfl2_uc_flr_en_set(self,
- (self->aq_nic_cfg->is_mc_list_enabled),
+ hw_atl_rpfl2_uc_flr_en_set(self,
+ (self->aq_nic_cfg->is_mc_list_enabled),
HW_ATL_B0_MAC_MIN + i);
}
@@ -824,10 +830,10 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
switch (self->aq_nic_cfg->itr) {
case AQ_CFG_INTERRUPT_MODERATION_ON:
case AQ_CFG_INTERRUPT_MODERATION_AUTO:
- tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
- tdm_tdm_intr_moder_en_set(self, 1U);
- rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
- rdm_rdm_intr_moder_en_set(self, 1U);
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
+ hw_atl_tdm_tdm_intr_moder_en_set(self, 1U);
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
+ hw_atl_rdm_rdm_intr_moder_en_set(self, 1U);
if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
/* HW timers are in 2us units */
@@ -887,18 +893,18 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
}
break;
case AQ_CFG_INTERRUPT_MODERATION_OFF:
- tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
- tdm_tdm_intr_moder_en_set(self, 0U);
- rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
- rdm_rdm_intr_moder_en_set(self, 0U);
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_tdm_tdm_intr_moder_en_set(self, 0U);
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_rdm_rdm_intr_moder_en_set(self, 0U);
itr_tx = 0U;
itr_rx = 0U;
break;
}
for (i = HW_ATL_B0_RINGS_MAX; i--;) {
- reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
- reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
+ hw_atl_reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
+ hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
}
return aq_hw_err_from_flags(self);
@@ -913,38 +919,19 @@ static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- tdm_tx_desc_en_set(self, 0U, ring->idx);
+ hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- rdm_rx_desc_en_set(self, 0U, ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_set_speed(struct aq_hw_s *self, u32 speed)
-{
- int err = 0;
-
- err = hw_atl_utils_mpi_set_speed(self, speed, MPI_INIT);
- if (err < 0)
- goto err_exit;
-
-err_exit:
- return err;
-}
-
-static struct aq_hw_ops hw_atl_ops_ = {
- .create = hw_atl_b0_create,
- .destroy = hw_atl_b0_destroy,
- .get_hw_caps = hw_atl_b0_get_hw_caps,
-
- .hw_get_mac_permanent = hw_atl_utils_get_mac_permanent,
+const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
- .hw_get_link_status = hw_atl_utils_mpi_get_link_status,
- .hw_set_link_speed = hw_atl_b0_hw_set_speed,
.hw_init = hw_atl_b0_hw_init,
.hw_deinit = hw_atl_utils_hw_deinit,
.hw_set_power = hw_atl_utils_hw_set_power,
@@ -974,21 +961,6 @@ static struct aq_hw_ops hw_atl_ops_ = {
.hw_rss_set = hw_atl_b0_hw_rss_set,
.hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set,
.hw_get_regs = hw_atl_utils_hw_get_regs,
- .hw_update_stats = hw_atl_utils_update_stats,
.hw_get_hw_stats = hw_atl_utils_get_hw_stats,
.hw_get_fw_version = hw_atl_utils_get_fw_version,
};
-
-struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev)
-{
- bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA);
- bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) ||
- (pdev->device == HW_ATL_DEVICE_ID_D100) ||
- (pdev->device == HW_ATL_DEVICE_ID_D107) ||
- (pdev->device == HW_ATL_DEVICE_ID_D108) ||
- (pdev->device == HW_ATL_DEVICE_ID_D109));
-
- bool is_rev_ok = (pdev->revision == 2U);
-
- return (is_vid_ok && is_did_ok && is_rev_ok) ? &hw_atl_ops_ : NULL;
-}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
index a1e1bce6c1f3..2cc8dacfdc27 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
@@ -16,19 +16,27 @@
#include "../aq_common.h"
-#ifndef PCI_VENDOR_ID_AQUANTIA
+extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc100;
+extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc107;
+extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc108;
+extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc109;
-#define PCI_VENDOR_ID_AQUANTIA 0x1D6A
-#define HW_ATL_DEVICE_ID_0001 0x0001
-#define HW_ATL_DEVICE_ID_D100 0xD100
-#define HW_ATL_DEVICE_ID_D107 0xD107
-#define HW_ATL_DEVICE_ID_D108 0xD108
-#define HW_ATL_DEVICE_ID_D109 0xD109
+#define hw_atl_b0_caps_aqc111 hw_atl_b0_caps_aqc108
+#define hw_atl_b0_caps_aqc112 hw_atl_b0_caps_aqc109
-#define HW_ATL_NIC_NAME "aQuantia AQtion 5Gbit Network Adapter"
+#define hw_atl_b0_caps_aqc100s hw_atl_b0_caps_aqc100
+#define hw_atl_b0_caps_aqc107s hw_atl_b0_caps_aqc107
+#define hw_atl_b0_caps_aqc108s hw_atl_b0_caps_aqc108
+#define hw_atl_b0_caps_aqc109s hw_atl_b0_caps_aqc109
-#endif
+#define hw_atl_b0_caps_aqc111s hw_atl_b0_caps_aqc108
+#define hw_atl_b0_caps_aqc112s hw_atl_b0_caps_aqc109
-struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev);
+#define hw_atl_b0_caps_aqc111e hw_atl_b0_caps_aqc108
+#define hw_atl_b0_caps_aqc112e hw_atl_b0_caps_aqc109
+
+extern const struct aq_hw_ops hw_atl_ops_b0;
+
+#define hw_atl_ops_b1 hw_atl_ops_b0
#endif /* HW_ATL_B0_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index 9aa2c6edfca2..405d1455c222 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -142,70 +142,6 @@
#define HW_ATL_INTR_MODER_MAX 0x1FF
#define HW_ATL_INTR_MODER_MIN 0xFF
-/* Hardware tx descriptor */
-struct __packed hw_atl_txd_s {
- u64 buf_addr;
- u32 ctl;
- u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
-};
-
-/* Hardware tx context descriptor */
-struct __packed hw_atl_txc_s {
- u32 rsvd;
- u32 len;
- u32 ctl;
- u32 len2;
-};
-
-/* Hardware rx descriptor */
-struct __packed hw_atl_rxd_s {
- u64 buf_addr;
- u64 hdr_addr;
-};
-
-/* Hardware rx descriptor writeback */
-struct __packed hw_atl_rxd_wb_s {
- u32 type;
- u32 rss_hash;
- u16 status;
- u16 pkt_len;
- u16 next_desc_ptr;
- u16 vlan;
-};
-
/* HW layer capabilities */
-static struct aq_hw_caps_s hw_atl_b0_hw_caps_ = {
- .ports = 1U,
- .is_64_dma = true,
- .msix_irqs = 4U,
- .irq_mask = ~0U,
- .vecs = HW_ATL_B0_RSS_MAX,
- .tcs = HW_ATL_B0_TC_MAX,
- .rxd_alignment = 1U,
- .rxd_size = HW_ATL_B0_RXD_SIZE,
- .rxds = 8U * 1024U,
- .txd_alignment = 1U,
- .txd_size = HW_ATL_B0_TXD_SIZE,
- .txds = 8U * 1024U,
- .txhwb_alignment = 4096U,
- .tx_rings = HW_ATL_B0_TX_RINGS,
- .rx_rings = HW_ATL_B0_RX_RINGS,
- .hw_features = NETIF_F_HW_CSUM |
- NETIF_F_RXCSUM |
- NETIF_F_RXHASH |
- NETIF_F_SG |
- NETIF_F_TSO |
- NETIF_F_LRO,
- .hw_priv_flags = IFF_UNICAST_FLT,
- .link_speed_msk = (HW_ATL_B0_RATE_10G |
- HW_ATL_B0_RATE_5G |
- HW_ATL_B0_RATE_2G5 |
- HW_ATL_B0_RATE_1G |
- HW_ATL_B0_RATE_100M),
- .flow_control = true,
- .mtu = HW_ATL_B0_MTU_JUMBO,
- .mac_regs_count = 88,
- .fw_ver_expected = HW_ATL_B0_FW_VER_EXPECTED,
-};
#endif /* HW_ATL_B0_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 3de651afa8c7..10ba035dadb1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -16,111 +16,115 @@
#include "../aq_hw_utils.h"
/* global */
-void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, u32 semaphore)
+void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
+ u32 semaphore)
{
- aq_hw_write_reg(aq_hw, glb_cpu_sem_adr(semaphore), glb_cpu_sem);
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_CPU_SEM_ADR(semaphore), glb_cpu_sem);
}
-u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore)
+u32 hw_atl_reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore)
{
- return aq_hw_read_reg(aq_hw, glb_cpu_sem_adr(semaphore));
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_CPU_SEM_ADR(semaphore));
}
-void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis)
+void hw_atl_glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis)
{
- aq_hw_write_reg_bit(aq_hw, glb_reg_res_dis_adr,
- glb_reg_res_dis_msk,
- glb_reg_res_dis_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_GLB_REG_RES_DIS_ADR,
+ HW_ATL_GLB_REG_RES_DIS_MSK,
+ HW_ATL_GLB_REG_RES_DIS_SHIFT,
glb_reg_res_dis);
}
-void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res)
+void hw_atl_glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res)
{
- aq_hw_write_reg_bit(aq_hw, glb_soft_res_adr, glb_soft_res_msk,
- glb_soft_res_shift, soft_res);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_GLB_SOFT_RES_ADR,
+ HW_ATL_GLB_SOFT_RES_MSK,
+ HW_ATL_GLB_SOFT_RES_SHIFT, soft_res);
}
-u32 glb_soft_res_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg_bit(aq_hw, glb_soft_res_adr,
- glb_soft_res_msk,
- glb_soft_res_shift);
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_GLB_SOFT_RES_ADR,
+ HW_ATL_GLB_SOFT_RES_MSK,
+ HW_ATL_GLB_SOFT_RES_SHIFT);
}
-u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw)
+u32 hw_atl_reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, rx_dma_stat_counter7_adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_RX_DMA_STAT_COUNTER7_ADR);
}
-u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, glb_mif_id_adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MIF_ID_ADR);
}
/* stats */
-u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, rpb_rx_dma_drop_pkt_cnt_adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_RPB_RX_DMA_DROP_PKT_CNT_ADR);
}
-u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_counterlsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERLSW);
}
-u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_counterlsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERLSW);
}
-u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_counterlsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERLSW);
}
-u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_counterlsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERLSW);
}
-u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_countermsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERMSW);
}
-u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_countermsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERMSW);
}
-u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_countermsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERMSW);
}
-u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_countermsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERMSW);
}
/* interrupt */
-void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw)
+void hw_atl_itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_auto_masklsw)
{
- aq_hw_write_reg(aq_hw, itr_iamrlsw_adr, irq_auto_masklsw);
+ aq_hw_write_reg(aq_hw, HW_ATL_ITR_IAMRLSW_ADR, irq_auto_masklsw);
}
-void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx)
+void hw_atl_itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx,
+ u32 rx)
{
/* register address for bitfield imr_rx{r}_en */
static u32 itr_imr_rxren_adr[32] = {
0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
- 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+ 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
- 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+ 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
- 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+ 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
- 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+ 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
};
/* bitmask for bitfield imr_rx{r}_en */
@@ -149,18 +153,19 @@ void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx)
irq_map_en_rx);
}
-void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx)
+void hw_atl_itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx,
+ u32 tx)
{
/* register address for bitfield imr_tx{t}_en */
static u32 itr_imr_txten_adr[32] = {
0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
- 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+ 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
- 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+ 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
- 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+ 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
- 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+ 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
};
/* bitmask for bitfield imr_tx{t}_en */
@@ -189,30 +194,30 @@ void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx)
irq_map_en_tx);
}
-void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx)
+void hw_atl_itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx)
{
/* register address for bitfield imr_rx{r}[4:0] */
static u32 itr_imr_rxr_adr[32] = {
0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
- 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+ 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
- 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+ 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
- 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+ 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
- 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+ 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
};
/* bitmask for bitfield imr_rx{r}[4:0] */
static u32 itr_imr_rxr_msk[32] = {
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU
};
/* lower bit position of bitfield imr_rx{r}[4:0] */
@@ -229,30 +234,30 @@ void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx)
irq_map_rx);
}
-void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx)
+void hw_atl_itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx)
{
/* register address for bitfield imr_tx{t}[4:0] */
static u32 itr_imr_txt_adr[32] = {
0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
- 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+ 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
- 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+ 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
- 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+ 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
- 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+ 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
};
/* bitmask for bitfield imr_tx{t}[4:0] */
static u32 itr_imr_txt_msk[32] = {
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U
};
/* lower bit position of bitfield imr_tx{t}[4:0] */
@@ -269,429 +274,463 @@ void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx)
irq_map_tx);
}
-void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw)
+void hw_atl_itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_msk_clearlsw)
{
- aq_hw_write_reg(aq_hw, itr_imcrlsw_adr, irq_msk_clearlsw);
+ aq_hw_write_reg(aq_hw, HW_ATL_ITR_IMCRLSW_ADR, irq_msk_clearlsw);
}
-void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw)
+void hw_atl_itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw)
{
- aq_hw_write_reg(aq_hw, itr_imsrlsw_adr, irq_msk_setlsw);
+ aq_hw_write_reg(aq_hw, HW_ATL_ITR_IMSRLSW_ADR, irq_msk_setlsw);
}
-void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis)
+void hw_atl_itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis)
{
- aq_hw_write_reg_bit(aq_hw, itr_reg_res_dsbl_adr,
- itr_reg_res_dsbl_msk,
- itr_reg_res_dsbl_shift, irq_reg_res_dis);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_REG_RES_DSBL_ADR,
+ HW_ATL_ITR_REG_RES_DSBL_MSK,
+ HW_ATL_ITR_REG_RES_DSBL_SHIFT, irq_reg_res_dis);
}
-void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
- u32 irq_status_clearlsw)
+void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_status_clearlsw)
{
- aq_hw_write_reg(aq_hw, itr_iscrlsw_adr, irq_status_clearlsw);
+ aq_hw_write_reg(aq_hw, HW_ATL_ITR_ISCRLSW_ADR, irq_status_clearlsw);
}
-u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_itr_irq_statuslsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, itr_isrlsw_adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_ITR_ISRLSW_ADR);
}
-u32 itr_res_irq_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg_bit(aq_hw, itr_res_adr, itr_res_msk,
- itr_res_shift);
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_ITR_RES_ADR, HW_ATL_ITR_RES_MSK,
+ HW_ATL_ITR_RES_SHIFT);
}
-void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq)
+void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq)
{
- aq_hw_write_reg_bit(aq_hw, itr_res_adr, itr_res_msk,
- itr_res_shift, res_irq);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_RES_ADR, HW_ATL_ITR_RES_MSK,
+ HW_ATL_ITR_RES_SHIFT, res_irq);
}
/* rdm */
-void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
+void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
{
- aq_hw_write_reg_bit(aq_hw, rdm_dcadcpuid_adr(dca),
- rdm_dcadcpuid_msk,
- rdm_dcadcpuid_shift, cpuid);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADCPUID_ADR(dca),
+ HW_ATL_RDM_DCADCPUID_MSK,
+ HW_ATL_RDM_DCADCPUID_SHIFT, cpuid);
}
-void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en)
+void hw_atl_rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en)
{
- aq_hw_write_reg_bit(aq_hw, rdm_dca_en_adr, rdm_dca_en_msk,
- rdm_dca_en_shift, rx_dca_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCA_EN_ADR, HW_ATL_RDM_DCA_EN_MSK,
+ HW_ATL_RDM_DCA_EN_SHIFT, rx_dca_en);
}
-void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode)
+void hw_atl_rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode)
{
- aq_hw_write_reg_bit(aq_hw, rdm_dca_mode_adr, rdm_dca_mode_msk,
- rdm_dca_mode_shift, rx_dca_mode);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCA_MODE_ADR,
+ HW_ATL_RDM_DCA_MODE_MSK,
+ HW_ATL_RDM_DCA_MODE_SHIFT, rx_dca_mode);
}
-void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_data_buff_size, u32 descriptor)
+void hw_atl_rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_data_buff_size,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, rdm_descddata_size_adr(descriptor),
- rdm_descddata_size_msk,
- rdm_descddata_size_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDDATA_SIZE_ADR(descriptor),
+ HW_ATL_RDM_DESCDDATA_SIZE_MSK,
+ HW_ATL_RDM_DESCDDATA_SIZE_SHIFT,
rx_desc_data_buff_size);
}
-void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en, u32 dca)
+void hw_atl_rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
+ u32 dca)
{
- aq_hw_write_reg_bit(aq_hw, rdm_dcaddesc_en_adr(dca),
- rdm_dcaddesc_en_msk,
- rdm_dcaddesc_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADDESC_EN_ADR(dca),
+ HW_ATL_RDM_DCADDESC_EN_MSK,
+ HW_ATL_RDM_DCADDESC_EN_SHIFT,
rx_desc_dca_en);
}
-void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en, u32 descriptor)
+void hw_atl_rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, rdm_descden_adr(descriptor),
- rdm_descden_msk,
- rdm_descden_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDEN_ADR(descriptor),
+ HW_ATL_RDM_DESCDEN_MSK,
+ HW_ATL_RDM_DESCDEN_SHIFT,
rx_desc_en);
}
-void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_head_buff_size, u32 descriptor)
+void hw_atl_rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_buff_size,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_size_adr(descriptor),
- rdm_descdhdr_size_msk,
- rdm_descdhdr_size_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDHDR_SIZE_ADR(descriptor),
+ HW_ATL_RDM_DESCDHDR_SIZE_MSK,
+ HW_ATL_RDM_DESCDHDR_SIZE_SHIFT,
rx_desc_head_buff_size);
}
-void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_head_splitting, u32 descriptor)
+void hw_atl_rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_splitting,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_split_adr(descriptor),
- rdm_descdhdr_split_msk,
- rdm_descdhdr_split_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDHDR_SPLIT_ADR(descriptor),
+ HW_ATL_RDM_DESCDHDR_SPLIT_MSK,
+ HW_ATL_RDM_DESCDHDR_SPLIT_SHIFT,
rx_desc_head_splitting);
}
-u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
+u32 hw_atl_rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
{
- return aq_hw_read_reg_bit(aq_hw, rdm_descdhd_adr(descriptor),
- rdm_descdhd_msk, rdm_descdhd_shift);
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RDM_DESCDHD_ADR(descriptor),
+ HW_ATL_RDM_DESCDHD_MSK,
+ HW_ATL_RDM_DESCDHD_SHIFT);
}
-void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len, u32 descriptor)
+void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, rdm_descdlen_adr(descriptor),
- rdm_descdlen_msk, rdm_descdlen_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDLEN_ADR(descriptor),
+ HW_ATL_RDM_DESCDLEN_MSK, HW_ATL_RDM_DESCDLEN_SHIFT,
rx_desc_len);
}
-void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res, u32 descriptor)
+void hw_atl_rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, rdm_descdreset_adr(descriptor),
- rdm_descdreset_msk, rdm_descdreset_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDRESET_ADR(descriptor),
+ HW_ATL_RDM_DESCDRESET_MSK,
+ HW_ATL_RDM_DESCDRESET_SHIFT,
rx_desc_res);
}
-void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_wr_wb_irq_en)
+void hw_atl_rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_wr_wb_irq_en)
{
- aq_hw_write_reg_bit(aq_hw, rdm_int_desc_wrb_en_adr,
- rdm_int_desc_wrb_en_msk,
- rdm_int_desc_wrb_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_INT_DESC_WRB_EN_ADR,
+ HW_ATL_RDM_INT_DESC_WRB_EN_MSK,
+ HW_ATL_RDM_INT_DESC_WRB_EN_SHIFT,
rx_desc_wr_wb_irq_en);
}
-void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en, u32 dca)
+void hw_atl_rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
+ u32 dca)
{
- aq_hw_write_reg_bit(aq_hw, rdm_dcadhdr_en_adr(dca),
- rdm_dcadhdr_en_msk,
- rdm_dcadhdr_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADHDR_EN_ADR(dca),
+ HW_ATL_RDM_DCADHDR_EN_MSK,
+ HW_ATL_RDM_DCADHDR_EN_SHIFT,
rx_head_dca_en);
}
-void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca)
+void hw_atl_rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en,
+ u32 dca)
{
- aq_hw_write_reg_bit(aq_hw, rdm_dcadpay_en_adr(dca),
- rdm_dcadpay_en_msk, rdm_dcadpay_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADPAY_EN_ADR(dca),
+ HW_ATL_RDM_DCADPAY_EN_MSK,
+ HW_ATL_RDM_DCADPAY_EN_SHIFT,
rx_pld_dca_en);
}
-void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en)
+void hw_atl_rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+ u32 rdm_intr_moder_en)
{
- aq_hw_write_reg_bit(aq_hw, rdm_int_rim_en_adr,
- rdm_int_rim_en_msk,
- rdm_int_rim_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_INT_RIM_EN_ADR,
+ HW_ATL_RDM_INT_RIM_EN_MSK,
+ HW_ATL_RDM_INT_RIM_EN_SHIFT,
rdm_intr_moder_en);
}
/* reg */
-void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx)
+void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map,
+ u32 regidx)
{
- aq_hw_write_reg(aq_hw, gen_intr_map_adr(regidx), gen_intr_map);
+ aq_hw_write_reg(aq_hw, HW_ATL_GEN_INTR_MAP_ADR(regidx), gen_intr_map);
}
-u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_reg_gen_irq_status_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, gen_intr_stat_adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_GEN_INTR_STAT_ADR);
}
-void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl)
+void hw_atl_reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl)
{
- aq_hw_write_reg(aq_hw, intr_glb_ctl_adr, intr_glb_ctl);
+ aq_hw_write_reg(aq_hw, HW_ATL_INTR_GLB_CTL_ADR, intr_glb_ctl);
}
-void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle)
+void hw_atl_reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle)
{
- aq_hw_write_reg(aq_hw, intr_thr_adr(throttle), intr_thr);
+ aq_hw_write_reg(aq_hw, HW_ATL_INTR_THR_ADR(throttle), intr_thr);
}
-void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
- u32 rx_dma_desc_base_addrlsw,
- u32 descriptor)
+void hw_atl_reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrlsw,
+ u32 descriptor)
{
- aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrlsw_adr(descriptor),
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor),
rx_dma_desc_base_addrlsw);
}
-void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
- u32 rx_dma_desc_base_addrmsw,
- u32 descriptor)
+void hw_atl_reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrmsw,
+ u32 descriptor)
{
- aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrmsw_adr(descriptor),
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor),
rx_dma_desc_base_addrmsw);
}
-u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor)
+u32 hw_atl_reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor)
{
- return aq_hw_read_reg(aq_hw, rx_dma_desc_stat_adr(descriptor));
+ return aq_hw_read_reg(aq_hw, HW_ATL_RX_DMA_DESC_STAT_ADR(descriptor));
}
-void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
- u32 rx_dma_desc_tail_ptr, u32 descriptor)
+void hw_atl_reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_tail_ptr,
+ u32 descriptor)
{
- aq_hw_write_reg(aq_hw, rx_dma_desc_tail_ptr_adr(descriptor),
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_TAIL_PTR_ADR(descriptor),
rx_dma_desc_tail_ptr);
}
-void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr_msk)
+void hw_atl_reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
+ u32 rx_flr_mcst_flr_msk)
{
- aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_msk_adr, rx_flr_mcst_flr_msk);
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_MCST_FLR_MSK_ADR,
+ rx_flr_mcst_flr_msk);
}
-void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
- u32 filter)
+void hw_atl_reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
+ u32 filter)
{
- aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_adr(filter), rx_flr_mcst_flr);
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_MCST_FLR_ADR(filter),
+ rx_flr_mcst_flr);
}
-void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw, u32 rx_flr_rss_control1)
+void hw_atl_reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
+ u32 rx_flr_rss_control1)
{
- aq_hw_write_reg(aq_hw, rx_flr_rss_control1_adr, rx_flr_rss_control1);
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_RSS_CONTROL1_ADR,
+ rx_flr_rss_control1);
}
-void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_filter_control2)
+void hw_atl_reg_rx_flr_control2_set(struct aq_hw_s *aq_hw,
+ u32 rx_filter_control2)
{
- aq_hw_write_reg(aq_hw, rx_flr_control2_adr, rx_filter_control2);
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_CONTROL2_ADR, rx_filter_control2);
}
-void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
- u32 rx_intr_moderation_ctl,
- u32 queue)
+void hw_atl_reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 rx_intr_moderation_ctl,
+ u32 queue)
{
- aq_hw_write_reg(aq_hw, rx_intr_moderation_ctl_adr(queue),
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_INTR_MODERATION_CTL_ADR(queue),
rx_intr_moderation_ctl);
}
-void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl)
+void hw_atl_reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_debug_ctl)
{
- aq_hw_write_reg(aq_hw, tx_dma_debug_ctl_adr, tx_dma_debug_ctl);
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DEBUG_CTL_ADR, tx_dma_debug_ctl);
}
-void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
- u32 tx_dma_desc_base_addrlsw,
- u32 descriptor)
+void hw_atl_reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrlsw,
+ u32 descriptor)
{
- aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrlsw_adr(descriptor),
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor),
tx_dma_desc_base_addrlsw);
}
-void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
- u32 tx_dma_desc_base_addrmsw,
- u32 descriptor)
+void hw_atl_reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrmsw,
+ u32 descriptor)
{
- aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrmsw_adr(descriptor),
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor),
tx_dma_desc_base_addrmsw);
}
-void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
- u32 tx_dma_desc_tail_ptr, u32 descriptor)
+void hw_atl_reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_tail_ptr,
+ u32 descriptor)
{
- aq_hw_write_reg(aq_hw, tx_dma_desc_tail_ptr_adr(descriptor),
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_TAIL_PTR_ADR(descriptor),
tx_dma_desc_tail_ptr);
}
-void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
- u32 tx_intr_moderation_ctl,
- u32 queue)
+void hw_atl_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 tx_intr_moderation_ctl,
+ u32 queue)
{
- aq_hw_write_reg(aq_hw, tx_intr_moderation_ctl_adr(queue),
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_INTR_MODERATION_CTL_ADR(queue),
tx_intr_moderation_ctl);
}
/* RPB: rx packet buffer */
-void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk)
+void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk)
{
- aq_hw_write_reg_bit(aq_hw, rpb_dma_sys_lbk_adr,
- rpb_dma_sys_lbk_msk,
- rpb_dma_sys_lbk_shift, dma_sys_lbk);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_DMA_SYS_LBK_ADR,
+ HW_ATL_RPB_DMA_SYS_LBK_MSK,
+ HW_ATL_RPB_DMA_SYS_LBK_SHIFT, dma_sys_lbk);
}
-void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
- u32 rx_traf_class_mode)
+void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
+ u32 rx_traf_class_mode)
{
- aq_hw_write_reg_bit(aq_hw, rpb_rpf_rx_tc_mode_adr,
- rpb_rpf_rx_tc_mode_msk,
- rpb_rpf_rx_tc_mode_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RPF_RX_TC_MODE_ADR,
+ HW_ATL_RPB_RPF_RX_TC_MODE_MSK,
+ HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT,
rx_traf_class_mode);
}
-void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en)
+void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en)
{
- aq_hw_write_reg_bit(aq_hw, rpb_rx_buf_en_adr, rpb_rx_buf_en_msk,
- rpb_rx_buf_en_shift, rx_buff_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RX_BUF_EN_ADR,
+ HW_ATL_RPB_RX_BUF_EN_MSK,
+ HW_ATL_RPB_RX_BUF_EN_SHIFT, rx_buff_en);
}
-void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 rx_buff_hi_threshold_per_tc,
- u32 buffer)
+void hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_hi_threshold_per_tc,
+ u32 buffer)
{
- aq_hw_write_reg_bit(aq_hw, rpb_rxbhi_thresh_adr(buffer),
- rpb_rxbhi_thresh_msk, rpb_rxbhi_thresh_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBHI_THRESH_ADR(buffer),
+ HW_ATL_RPB_RXBHI_THRESH_MSK,
+ HW_ATL_RPB_RXBHI_THRESH_SHIFT,
rx_buff_hi_threshold_per_tc);
}
-void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 rx_buff_lo_threshold_per_tc,
- u32 buffer)
+void hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_lo_threshold_per_tc,
+ u32 buffer)
{
- aq_hw_write_reg_bit(aq_hw, rpb_rxblo_thresh_adr(buffer),
- rpb_rxblo_thresh_msk,
- rpb_rxblo_thresh_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBLO_THRESH_ADR(buffer),
+ HW_ATL_RPB_RXBLO_THRESH_MSK,
+ HW_ATL_RPB_RXBLO_THRESH_SHIFT,
rx_buff_lo_threshold_per_tc);
}
-void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode)
+void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode)
{
- aq_hw_write_reg_bit(aq_hw, rpb_rx_fc_mode_adr,
- rpb_rx_fc_mode_msk,
- rpb_rx_fc_mode_shift, rx_flow_ctl_mode);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RX_FC_MODE_ADR,
+ HW_ATL_RPB_RX_FC_MODE_MSK,
+ HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode);
}
-void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
- u32 rx_pkt_buff_size_per_tc, u32 buffer)
+void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_pkt_buff_size_per_tc, u32 buffer)
{
- aq_hw_write_reg_bit(aq_hw, rpb_rxbbuf_size_adr(buffer),
- rpb_rxbbuf_size_msk, rpb_rxbbuf_size_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBBUF_SIZE_ADR(buffer),
+ HW_ATL_RPB_RXBBUF_SIZE_MSK,
+ HW_ATL_RPB_RXBBUF_SIZE_SHIFT,
rx_pkt_buff_size_per_tc);
}
-void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
- u32 buffer)
+void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
+ u32 buffer)
{
- aq_hw_write_reg_bit(aq_hw, rpb_rxbxoff_en_adr(buffer),
- rpb_rxbxoff_en_msk, rpb_rxbxoff_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBXOFF_EN_ADR(buffer),
+ HW_ATL_RPB_RXBXOFF_EN_MSK,
+ HW_ATL_RPB_RXBXOFF_EN_SHIFT,
rx_xoff_en_per_tc);
}
/* rpf */
-void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
- u32 l2broadcast_count_threshold)
+void hw_atl_rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_count_threshold)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2bc_thresh_adr,
- rpfl2bc_thresh_msk,
- rpfl2bc_thresh_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_THRESH_ADR,
+ HW_ATL_RPFL2BC_THRESH_MSK,
+ HW_ATL_RPFL2BC_THRESH_SHIFT,
l2broadcast_count_threshold);
}
-void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en)
+void hw_atl_rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2bc_en_adr, rpfl2bc_en_msk,
- rpfl2bc_en_shift, l2broadcast_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_EN_ADR, HW_ATL_RPFL2BC_EN_MSK,
+ HW_ATL_RPFL2BC_EN_SHIFT, l2broadcast_en);
}
-void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2broadcast_flr_act)
+void hw_atl_rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_flr_act)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2bc_act_adr, rpfl2bc_act_msk,
- rpfl2bc_act_shift, l2broadcast_flr_act);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_ACT_ADR,
+ HW_ATL_RPFL2BC_ACT_MSK,
+ HW_ATL_RPFL2BC_ACT_SHIFT, l2broadcast_flr_act);
}
-void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en,
- u32 filter)
+void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
+ u32 l2multicast_flr_en,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2mc_enf_adr(filter),
- rpfl2mc_enf_msk,
- rpfl2mc_enf_shift, l2multicast_flr_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2MC_ENF_ADR(filter),
+ HW_ATL_RPFL2MC_ENF_MSK,
+ HW_ATL_RPFL2MC_ENF_SHIFT, l2multicast_flr_en);
}
-void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
- u32 l2promiscuous_mode_en)
+void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
+ u32 l2promiscuous_mode_en)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2promis_mode_adr,
- rpfl2promis_mode_msk,
- rpfl2promis_mode_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2PROMIS_MODE_ADR,
+ HW_ATL_RPFL2PROMIS_MODE_MSK,
+ HW_ATL_RPFL2PROMIS_MODE_SHIFT,
l2promiscuous_mode_en);
}
-void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act,
- u32 filter)
+void hw_atl_rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_flr_act,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2uc_actf_adr(filter),
- rpfl2uc_actf_msk, rpfl2uc_actf_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_ACTF_ADR(filter),
+ HW_ATL_RPFL2UC_ACTF_MSK, HW_ATL_RPFL2UC_ACTF_SHIFT,
l2unicast_flr_act);
}
-void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
- u32 filter)
+void hw_atl_rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2uc_enf_adr(filter),
- rpfl2uc_enf_msk,
- rpfl2uc_enf_shift, l2unicast_flr_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_ENF_ADR(filter),
+ HW_ATL_RPFL2UC_ENF_MSK,
+ HW_ATL_RPFL2UC_ENF_SHIFT, l2unicast_flr_en);
}
-void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
- u32 l2unicast_dest_addresslsw,
- u32 filter)
+void hw_atl_rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addresslsw,
+ u32 filter)
{
- aq_hw_write_reg(aq_hw, rpfl2uc_daflsw_adr(filter),
+ aq_hw_write_reg(aq_hw, HW_ATL_RPFL2UC_DAFLSW_ADR(filter),
l2unicast_dest_addresslsw);
}
-void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
- u32 l2unicast_dest_addressmsw,
- u32 filter)
+void hw_atl_rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addressmsw,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2uc_dafmsw_adr(filter),
- rpfl2uc_dafmsw_msk, rpfl2uc_dafmsw_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_DAFMSW_ADR(filter),
+ HW_ATL_RPFL2UC_DAFMSW_MSK,
+ HW_ATL_RPFL2UC_DAFMSW_SHIFT,
l2unicast_dest_addressmsw);
}
-void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
- u32 l2_accept_all_mc_packets)
+void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
+ u32 l2_accept_all_mc_packets)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2mc_accept_all_adr,
- rpfl2mc_accept_all_msk,
- rpfl2mc_accept_all_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2MC_ACCEPT_ALL_ADR,
+ HW_ATL_RPFL2MC_ACCEPT_ALL_MSK,
+ HW_ATL_RPFL2MC_ACCEPT_ALL_SHIFT,
l2_accept_all_mc_packets);
}
-void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
- u32 user_priority_tc_map, u32 tc)
+void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
+ u32 user_priority_tc_map, u32 tc)
{
/* register address for bitfield rx_tc_up{t}[2:0] */
static u32 rpf_rpb_rx_tc_upt_adr[8] = {
- 0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U,
- 0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U
+ 0x000054c4U, 0x000054C4U, 0x000054C4U, 0x000054C4U,
+ 0x000054c4U, 0x000054C4U, 0x000054C4U, 0x000054C4U
};
/* bitmask for bitfield rx_tc_up{t}[2:0] */
@@ -711,273 +750,290 @@ void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
user_priority_tc_map);
}
-void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr)
+void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr)
{
- aq_hw_write_reg_bit(aq_hw, rpf_rss_key_addr_adr,
- rpf_rss_key_addr_msk,
- rpf_rss_key_addr_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_ADDR_ADR,
+ HW_ATL_RPF_RSS_KEY_ADDR_MSK,
+ HW_ATL_RPF_RSS_KEY_ADDR_SHIFT,
rss_key_addr);
}
-void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data)
+void hw_atl_rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data)
{
- aq_hw_write_reg(aq_hw, rpf_rss_key_wr_data_adr,
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_RSS_KEY_WR_DATA_ADR,
rss_key_wr_data);
}
-u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr,
- rpf_rss_key_wr_eni_msk,
- rpf_rss_key_wr_eni_shift);
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_WR_ENI_ADR,
+ HW_ATL_RPF_RSS_KEY_WR_ENI_MSK,
+ HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT);
}
-void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en)
+void hw_atl_rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en)
{
- aq_hw_write_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr,
- rpf_rss_key_wr_eni_msk,
- rpf_rss_key_wr_eni_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_WR_ENI_ADR,
+ HW_ATL_RPF_RSS_KEY_WR_ENI_MSK,
+ HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT,
rss_key_wr_en);
}
-void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw, u32 rss_redir_tbl_addr)
+void hw_atl_rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_addr)
{
- aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_addr_adr,
- rpf_rss_redir_addr_msk,
- rpf_rss_redir_addr_shift, rss_redir_tbl_addr);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_ADDR_ADR,
+ HW_ATL_RPF_RSS_REDIR_ADDR_MSK,
+ HW_ATL_RPF_RSS_REDIR_ADDR_SHIFT,
+ rss_redir_tbl_addr);
}
-void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
- u32 rss_redir_tbl_wr_data)
+void hw_atl_rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_wr_data)
{
- aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_data_adr,
- rpf_rss_redir_wr_data_msk,
- rpf_rss_redir_wr_data_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_DATA_ADR,
+ HW_ATL_RPF_RSS_REDIR_WR_DATA_MSK,
+ HW_ATL_RPF_RSS_REDIR_WR_DATA_SHIFT,
rss_redir_tbl_wr_data);
}
-u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr,
- rpf_rss_redir_wr_eni_msk,
- rpf_rss_redir_wr_eni_shift);
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR,
+ HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK,
+ HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT);
}
-void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en)
+void hw_atl_rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en)
{
- aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr,
- rpf_rss_redir_wr_eni_msk,
- rpf_rss_redir_wr_eni_shift, rss_redir_wr_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR,
+ HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK,
+ HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT, rss_redir_wr_en);
}
-void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw, u32 tpo_to_rpf_sys_lbk)
+void hw_atl_rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
+ u32 tpo_to_rpf_sys_lbk)
{
- aq_hw_write_reg_bit(aq_hw, rpf_tpo_rpf_sys_lbk_adr,
- rpf_tpo_rpf_sys_lbk_msk,
- rpf_tpo_rpf_sys_lbk_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_TPO_RPF_SYS_LBK_ADR,
+ HW_ATL_RPF_TPO_RPF_SYS_LBK_MSK,
+ HW_ATL_RPF_TPO_RPF_SYS_LBK_SHIFT,
tpo_to_rpf_sys_lbk);
}
-void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht)
+void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_inner_tpid_adr,
- rpf_vl_inner_tpid_msk,
- rpf_vl_inner_tpid_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_INNER_TPID_ADR,
+ HW_ATL_RPF_VL_INNER_TPID_MSK,
+ HW_ATL_RPF_VL_INNER_TPID_SHIFT,
vlan_inner_etht);
}
-void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht)
+void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_outer_tpid_adr,
- rpf_vl_outer_tpid_msk,
- rpf_vl_outer_tpid_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_OUTER_TPID_ADR,
+ HW_ATL_RPF_VL_OUTER_TPID_MSK,
+ HW_ATL_RPF_VL_OUTER_TPID_SHIFT,
vlan_outer_etht);
}
-void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en)
+void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
+ u32 vlan_prom_mode_en)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_promis_mode_adr,
- rpf_vl_promis_mode_msk,
- rpf_vl_promis_mode_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_PROMIS_MODE_ADR,
+ HW_ATL_RPF_VL_PROMIS_MODE_MSK,
+ HW_ATL_RPF_VL_PROMIS_MODE_SHIFT,
vlan_prom_mode_en);
}
-void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
- u32 vlan_accept_untagged_packets)
+void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
+ u32 vlan_acc_untagged_packets)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_accept_untagged_mode_adr,
- rpf_vl_accept_untagged_mode_msk,
- rpf_vl_accept_untagged_mode_shift,
- vlan_accept_untagged_packets);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_ADR,
+ HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSK,
+ HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_SHIFT,
+ vlan_acc_untagged_packets);
}
-void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act)
+void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
+ u32 vlan_untagged_act)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_untagged_act_adr,
- rpf_vl_untagged_act_msk,
- rpf_vl_untagged_act_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_UNTAGGED_ACT_ADR,
+ HW_ATL_RPF_VL_UNTAGGED_ACT_MSK,
+ HW_ATL_RPF_VL_UNTAGGED_ACT_SHIFT,
vlan_untagged_act);
}
-void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter)
+void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_en_f_adr(filter),
- rpf_vl_en_f_msk,
- rpf_vl_en_f_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_EN_F_ADR(filter),
+ HW_ATL_RPF_VL_EN_F_MSK,
+ HW_ATL_RPF_VL_EN_F_SHIFT,
vlan_flr_en);
}
-void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act, u32 filter)
+void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_act_f_adr(filter),
- rpf_vl_act_f_msk,
- rpf_vl_act_f_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACT_F_ADR(filter),
+ HW_ATL_RPF_VL_ACT_F_MSK,
+ HW_ATL_RPF_VL_ACT_F_SHIFT,
vlan_flr_act);
}
-void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter)
+void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_id_f_adr(filter),
- rpf_vl_id_f_msk,
- rpf_vl_id_f_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ID_F_ADR(filter),
+ HW_ATL_RPF_VL_ID_F_MSK,
+ HW_ATL_RPF_VL_ID_F_SHIFT,
vlan_id_flr);
}
-void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter)
+void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_enf_adr(filter),
- rpf_et_enf_msk,
- rpf_et_enf_shift, etht_flr_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ENF_ADR(filter),
+ HW_ATL_RPF_ET_ENF_MSK,
+ HW_ATL_RPF_ET_ENF_SHIFT, etht_flr_en);
}
-void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
- u32 etht_user_priority_en, u32 filter)
+void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
+ u32 etht_user_priority_en, u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_upfen_adr(filter),
- rpf_et_upfen_msk, rpf_et_upfen_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPFEN_ADR(filter),
+ HW_ATL_RPF_ET_UPFEN_MSK, HW_ATL_RPF_ET_UPFEN_SHIFT,
etht_user_priority_en);
}
-void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en,
- u32 filter)
+void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw,
+ u32 etht_rx_queue_en,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_rxqfen_adr(filter),
- rpf_et_rxqfen_msk, rpf_et_rxqfen_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQFEN_ADR(filter),
+ HW_ATL_RPF_ET_RXQFEN_MSK,
+ HW_ATL_RPF_ET_RXQFEN_SHIFT,
etht_rx_queue_en);
}
-void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority,
- u32 filter)
+void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw,
+ u32 etht_user_priority,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_upf_adr(filter),
- rpf_et_upf_msk,
- rpf_et_upf_shift, etht_user_priority);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPF_ADR(filter),
+ HW_ATL_RPF_ET_UPF_MSK,
+ HW_ATL_RPF_ET_UPF_SHIFT, etht_user_priority);
}
-void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
- u32 filter)
+void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_rxqf_adr(filter),
- rpf_et_rxqf_msk,
- rpf_et_rxqf_shift, etht_rx_queue);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQF_ADR(filter),
+ HW_ATL_RPF_ET_RXQF_MSK,
+ HW_ATL_RPF_ET_RXQF_SHIFT, etht_rx_queue);
}
-void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
- u32 filter)
+void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_mng_rxqf_adr(filter),
- rpf_et_mng_rxqf_msk, rpf_et_mng_rxqf_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_MNG_RXQF_ADR(filter),
+ HW_ATL_RPF_ET_MNG_RXQF_MSK,
+ HW_ATL_RPF_ET_MNG_RXQF_SHIFT,
etht_mgt_queue);
}
-void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, u32 filter)
+void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_actf_adr(filter),
- rpf_et_actf_msk,
- rpf_et_actf_shift, etht_flr_act);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ACTF_ADR(filter),
+ HW_ATL_RPF_ET_ACTF_MSK,
+ HW_ATL_RPF_ET_ACTF_SHIFT, etht_flr_act);
}
-void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
+void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_valf_adr(filter),
- rpf_et_valf_msk,
- rpf_et_valf_shift, etht_flr);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_VALF_ADR(filter),
+ HW_ATL_RPF_ET_VALF_MSK,
+ HW_ATL_RPF_ET_VALF_SHIFT, etht_flr);
}
/* RPO: rx packet offload */
-void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 ipv4header_crc_offload_en)
+void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en)
{
- aq_hw_write_reg_bit(aq_hw, rpo_ipv4chk_en_adr,
- rpo_ipv4chk_en_msk,
- rpo_ipv4chk_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_IPV4CHK_EN_ADR,
+ HW_ATL_RPO_IPV4CHK_EN_MSK,
+ HW_ATL_RPO_IPV4CHK_EN_SHIFT,
ipv4header_crc_offload_en);
}
-void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_vlan_stripping, u32 descriptor)
+void hw_atl_rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_vlan_stripping,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, rpo_descdvl_strip_adr(descriptor),
- rpo_descdvl_strip_msk,
- rpo_descdvl_strip_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_DESCDVL_STRIP_ADR(descriptor),
+ HW_ATL_RPO_DESCDVL_STRIP_MSK,
+ HW_ATL_RPO_DESCDVL_STRIP_SHIFT,
rx_desc_vlan_stripping);
}
-void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 tcp_udp_crc_offload_en)
+void hw_atl_rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en)
{
- aq_hw_write_reg_bit(aq_hw, rpol4chk_en_adr, rpol4chk_en_msk,
- rpol4chk_en_shift, tcp_udp_crc_offload_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPOL4CHK_EN_ADR,
+ HW_ATL_RPOL4CHK_EN_MSK,
+ HW_ATL_RPOL4CHK_EN_SHIFT, tcp_udp_crc_offload_en);
}
-void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en)
+void hw_atl_rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en)
{
- aq_hw_write_reg(aq_hw, rpo_lro_en_adr, lro_en);
+ aq_hw_write_reg(aq_hw, HW_ATL_RPO_LRO_EN_ADR, lro_en);
}
-void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
- u32 lro_patch_optimization_en)
+void hw_atl_rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
+ u32 lro_patch_optimization_en)
{
- aq_hw_write_reg_bit(aq_hw, rpo_lro_ptopt_en_adr,
- rpo_lro_ptopt_en_msk,
- rpo_lro_ptopt_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_PTOPT_EN_ADR,
+ HW_ATL_RPO_LRO_PTOPT_EN_MSK,
+ HW_ATL_RPO_LRO_PTOPT_EN_SHIFT,
lro_patch_optimization_en);
}
-void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
- u32 lro_qsessions_lim)
+void hw_atl_rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
+ u32 lro_qsessions_lim)
{
- aq_hw_write_reg_bit(aq_hw, rpo_lro_qses_lmt_adr,
- rpo_lro_qses_lmt_msk,
- rpo_lro_qses_lmt_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_QSES_LMT_ADR,
+ HW_ATL_RPO_LRO_QSES_LMT_MSK,
+ HW_ATL_RPO_LRO_QSES_LMT_SHIFT,
lro_qsessions_lim);
}
-void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim)
+void hw_atl_rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw,
+ u32 lro_total_desc_lim)
{
- aq_hw_write_reg_bit(aq_hw, rpo_lro_tot_dsc_lmt_adr,
- rpo_lro_tot_dsc_lmt_msk,
- rpo_lro_tot_dsc_lmt_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_TOT_DSC_LMT_ADR,
+ HW_ATL_RPO_LRO_TOT_DSC_LMT_MSK,
+ HW_ATL_RPO_LRO_TOT_DSC_LMT_SHIFT,
lro_total_desc_lim);
}
-void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
- u32 lro_min_pld_of_first_pkt)
+void hw_atl_rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lro_min_pld_of_first_pkt)
{
- aq_hw_write_reg_bit(aq_hw, rpo_lro_pkt_min_adr,
- rpo_lro_pkt_min_msk,
- rpo_lro_pkt_min_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_PKT_MIN_ADR,
+ HW_ATL_RPO_LRO_PKT_MIN_MSK,
+ HW_ATL_RPO_LRO_PKT_MIN_SHIFT,
lro_min_pld_of_first_pkt);
}
-void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_pkt_lim)
+void hw_atl_rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_pkt_lim)
{
- aq_hw_write_reg(aq_hw, rpo_lro_rsc_max_adr, lro_pkt_lim);
+ aq_hw_write_reg(aq_hw, HW_ATL_RPO_LRO_RSC_MAX_ADR, lro_pkt_lim);
}
-void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
- u32 lro_max_number_of_descriptors,
- u32 lro)
+void hw_atl_rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_number_of_descriptors,
+ u32 lro)
{
/* Register address for bitfield lro{L}_des_max[1:0] */
static u32 rpo_lro_ldes_max_adr[32] = {
@@ -1017,378 +1073,390 @@ void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
lro_max_number_of_descriptors);
}
-void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
- u32 lro_time_base_divider)
+void hw_atl_rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
+ u32 lro_time_base_divider)
{
- aq_hw_write_reg_bit(aq_hw, rpo_lro_tb_div_adr,
- rpo_lro_tb_div_msk,
- rpo_lro_tb_div_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_TB_DIV_ADR,
+ HW_ATL_RPO_LRO_TB_DIV_MSK,
+ HW_ATL_RPO_LRO_TB_DIV_SHIFT,
lro_time_base_divider);
}
-void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
- u32 lro_inactive_interval)
+void hw_atl_rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_inactive_interval)
{
- aq_hw_write_reg_bit(aq_hw, rpo_lro_ina_ival_adr,
- rpo_lro_ina_ival_msk,
- rpo_lro_ina_ival_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_INA_IVAL_ADR,
+ HW_ATL_RPO_LRO_INA_IVAL_MSK,
+ HW_ATL_RPO_LRO_INA_IVAL_SHIFT,
lro_inactive_interval);
}
-void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
- u32 lro_max_coalescing_interval)
+void hw_atl_rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_coal_interval)
{
- aq_hw_write_reg_bit(aq_hw, rpo_lro_max_ival_adr,
- rpo_lro_max_ival_msk,
- rpo_lro_max_ival_shift,
- lro_max_coalescing_interval);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_MAX_IVAL_ADR,
+ HW_ATL_RPO_LRO_MAX_IVAL_MSK,
+ HW_ATL_RPO_LRO_MAX_IVAL_SHIFT,
+ lro_max_coal_interval);
}
/* rx */
-void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis)
+void hw_atl_rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis)
{
- aq_hw_write_reg_bit(aq_hw, rx_reg_res_dsbl_adr,
- rx_reg_res_dsbl_msk,
- rx_reg_res_dsbl_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RX_REG_RES_DSBL_ADR,
+ HW_ATL_RX_REG_RES_DSBL_MSK,
+ HW_ATL_RX_REG_RES_DSBL_SHIFT,
rx_reg_res_dis);
}
/* tdm */
-void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
+void hw_atl_tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
{
- aq_hw_write_reg_bit(aq_hw, tdm_dcadcpuid_adr(dca),
- tdm_dcadcpuid_msk,
- tdm_dcadcpuid_shift, cpuid);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCADCPUID_ADR(dca),
+ HW_ATL_TDM_DCADCPUID_MSK,
+ HW_ATL_TDM_DCADCPUID_SHIFT, cpuid);
}
-void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
- u32 large_send_offload_en)
+void hw_atl_tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 large_send_offload_en)
{
- aq_hw_write_reg(aq_hw, tdm_lso_en_adr, large_send_offload_en);
+ aq_hw_write_reg(aq_hw, HW_ATL_TDM_LSO_EN_ADR, large_send_offload_en);
}
-void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en)
+void hw_atl_tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en)
{
- aq_hw_write_reg_bit(aq_hw, tdm_dca_en_adr, tdm_dca_en_msk,
- tdm_dca_en_shift, tx_dca_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCA_EN_ADR, HW_ATL_TDM_DCA_EN_MSK,
+ HW_ATL_TDM_DCA_EN_SHIFT, tx_dca_en);
}
-void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode)
+void hw_atl_tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode)
{
- aq_hw_write_reg_bit(aq_hw, tdm_dca_mode_adr, tdm_dca_mode_msk,
- tdm_dca_mode_shift, tx_dca_mode);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCA_MODE_ADR,
+ HW_ATL_TDM_DCA_MODE_MSK,
+ HW_ATL_TDM_DCA_MODE_SHIFT, tx_dca_mode);
}
-void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca)
+void hw_atl_tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en,
+ u32 dca)
{
- aq_hw_write_reg_bit(aq_hw, tdm_dcaddesc_en_adr(dca),
- tdm_dcaddesc_en_msk, tdm_dcaddesc_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCADDESC_EN_ADR(dca),
+ HW_ATL_TDM_DCADDESC_EN_MSK,
+ HW_ATL_TDM_DCADDESC_EN_SHIFT,
tx_desc_dca_en);
}
-void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor)
+void hw_atl_tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, tdm_descden_adr(descriptor),
- tdm_descden_msk,
- tdm_descden_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDEN_ADR(descriptor),
+ HW_ATL_TDM_DESCDEN_MSK,
+ HW_ATL_TDM_DESCDEN_SHIFT,
tx_desc_en);
}
-u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
+u32 hw_atl_tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
{
- return aq_hw_read_reg_bit(aq_hw, tdm_descdhd_adr(descriptor),
- tdm_descdhd_msk, tdm_descdhd_shift);
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_TDM_DESCDHD_ADR(descriptor),
+ HW_ATL_TDM_DESCDHD_MSK,
+ HW_ATL_TDM_DESCDHD_SHIFT);
}
-void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
- u32 descriptor)
+void hw_atl_tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, tdm_descdlen_adr(descriptor),
- tdm_descdlen_msk,
- tdm_descdlen_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDLEN_ADR(descriptor),
+ HW_ATL_TDM_DESCDLEN_MSK,
+ HW_ATL_TDM_DESCDLEN_SHIFT,
tx_desc_len);
}
-void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
- u32 tx_desc_wr_wb_irq_en)
+void hw_atl_tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_irq_en)
{
- aq_hw_write_reg_bit(aq_hw, tdm_int_desc_wrb_en_adr,
- tdm_int_desc_wrb_en_msk,
- tdm_int_desc_wrb_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_INT_DESC_WRB_EN_ADR,
+ HW_ATL_TDM_INT_DESC_WRB_EN_MSK,
+ HW_ATL_TDM_INT_DESC_WRB_EN_SHIFT,
tx_desc_wr_wb_irq_en);
}
-void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
- u32 tx_desc_wr_wb_threshold,
- u32 descriptor)
+void hw_atl_tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_threshold,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, tdm_descdwrb_thresh_adr(descriptor),
- tdm_descdwrb_thresh_msk,
- tdm_descdwrb_thresh_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDWRB_THRESH_ADR(descriptor),
+ HW_ATL_TDM_DESCDWRB_THRESH_MSK,
+ HW_ATL_TDM_DESCDWRB_THRESH_SHIFT,
tx_desc_wr_wb_threshold);
}
-void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
- u32 tdm_irq_moderation_en)
+void hw_atl_tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+ u32 tdm_irq_moderation_en)
{
- aq_hw_write_reg_bit(aq_hw, tdm_int_mod_en_adr,
- tdm_int_mod_en_msk,
- tdm_int_mod_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_INT_MOD_EN_ADR,
+ HW_ATL_TDM_INT_MOD_EN_MSK,
+ HW_ATL_TDM_INT_MOD_EN_SHIFT,
tdm_irq_moderation_en);
}
/* thm */
-void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
- u32 lso_tcp_flag_of_first_pkt)
+void hw_atl_thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_first_pkt)
{
- aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_first_adr,
- thm_lso_tcp_flag_first_msk,
- thm_lso_tcp_flag_first_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_FIRST_ADR,
+ HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSK,
+ HW_ATL_THM_LSO_TCP_FLAG_FIRST_SHIFT,
lso_tcp_flag_of_first_pkt);
}
-void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
- u32 lso_tcp_flag_of_last_pkt)
+void hw_atl_thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_last_pkt)
{
- aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_last_adr,
- thm_lso_tcp_flag_last_msk,
- thm_lso_tcp_flag_last_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_LAST_ADR,
+ HW_ATL_THM_LSO_TCP_FLAG_LAST_MSK,
+ HW_ATL_THM_LSO_TCP_FLAG_LAST_SHIFT,
lso_tcp_flag_of_last_pkt);
}
-void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
- u32 lso_tcp_flag_of_middle_pkt)
+void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_middle_pkt)
{
- aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_mid_adr,
- thm_lso_tcp_flag_mid_msk,
- thm_lso_tcp_flag_mid_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_MID_ADR,
+ HW_ATL_THM_LSO_TCP_FLAG_MID_MSK,
+ HW_ATL_THM_LSO_TCP_FLAG_MID_SHIFT,
lso_tcp_flag_of_middle_pkt);
}
/* TPB: tx packet buffer */
-void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
+void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
{
- aq_hw_write_reg_bit(aq_hw, tpb_tx_buf_en_adr, tpb_tx_buf_en_msk,
- tpb_tx_buf_en_shift, tx_buff_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_BUF_EN_ADR,
+ HW_ATL_TPB_TX_BUF_EN_MSK,
+ HW_ATL_TPB_TX_BUF_EN_SHIFT, tx_buff_en);
}
-void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 tx_buff_hi_threshold_per_tc,
+void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_hi_threshold_per_tc,
u32 buffer)
{
- aq_hw_write_reg_bit(aq_hw, tpb_txbhi_thresh_adr(buffer),
- tpb_txbhi_thresh_msk, tpb_txbhi_thresh_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBHI_THRESH_ADR(buffer),
+ HW_ATL_TPB_TXBHI_THRESH_MSK,
+ HW_ATL_TPB_TXBHI_THRESH_SHIFT,
tx_buff_hi_threshold_per_tc);
}
-void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 tx_buff_lo_threshold_per_tc,
+void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_lo_threshold_per_tc,
u32 buffer)
{
- aq_hw_write_reg_bit(aq_hw, tpb_txblo_thresh_adr(buffer),
- tpb_txblo_thresh_msk, tpb_txblo_thresh_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBLO_THRESH_ADR(buffer),
+ HW_ATL_TPB_TXBLO_THRESH_MSK,
+ HW_ATL_TPB_TXBLO_THRESH_SHIFT,
tx_buff_lo_threshold_per_tc);
}
-void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en)
+void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en)
{
- aq_hw_write_reg_bit(aq_hw, tpb_dma_sys_lbk_adr,
- tpb_dma_sys_lbk_msk,
- tpb_dma_sys_lbk_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_DMA_SYS_LBK_ADR,
+ HW_ATL_TPB_DMA_SYS_LBK_MSK,
+ HW_ATL_TPB_DMA_SYS_LBK_SHIFT,
tx_dma_sys_lbk_en);
}
-void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_buff_size_per_tc, u32 buffer)
+void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_buff_size_per_tc, u32 buffer)
{
- aq_hw_write_reg_bit(aq_hw, tpb_txbbuf_size_adr(buffer),
- tpb_txbbuf_size_msk,
- tpb_txbbuf_size_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBBUF_SIZE_ADR(buffer),
+ HW_ATL_TPB_TXBBUF_SIZE_MSK,
+ HW_ATL_TPB_TXBBUF_SIZE_SHIFT,
tx_pkt_buff_size_per_tc);
}
-void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en)
+void hw_atl_tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en)
{
- aq_hw_write_reg_bit(aq_hw, tpb_tx_scp_ins_en_adr,
- tpb_tx_scp_ins_en_msk,
- tpb_tx_scp_ins_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_SCP_INS_EN_ADR,
+ HW_ATL_TPB_TX_SCP_INS_EN_MSK,
+ HW_ATL_TPB_TX_SCP_INS_EN_SHIFT,
tx_path_scp_ins_en);
}
/* TPO: tx packet offload */
-void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 ipv4header_crc_offload_en)
+void hw_atl_tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en)
{
- aq_hw_write_reg_bit(aq_hw, tpo_ipv4chk_en_adr,
- tpo_ipv4chk_en_msk,
- tpo_ipv4chk_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPO_IPV4CHK_EN_ADR,
+ HW_ATL_TPO_IPV4CHK_EN_MSK,
+ HW_ATL_TPO_IPV4CHK_EN_SHIFT,
ipv4header_crc_offload_en);
}
-void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 tcp_udp_crc_offload_en)
+void hw_atl_tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en)
{
- aq_hw_write_reg_bit(aq_hw, tpol4chk_en_adr,
- tpol4chk_en_msk,
- tpol4chk_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPOL4CHK_EN_ADR,
+ HW_ATL_TPOL4CHK_EN_MSK,
+ HW_ATL_TPOL4CHK_EN_SHIFT,
tcp_udp_crc_offload_en);
}
-void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en)
+void hw_atl_tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_sys_lbk_en)
{
- aq_hw_write_reg_bit(aq_hw, tpo_pkt_sys_lbk_adr,
- tpo_pkt_sys_lbk_msk,
- tpo_pkt_sys_lbk_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPO_PKT_SYS_LBK_ADR,
+ HW_ATL_TPO_PKT_SYS_LBK_MSK,
+ HW_ATL_TPO_PKT_SYS_LBK_SHIFT,
tx_pkt_sys_lbk_en);
}
/* TPS: tx packet scheduler */
-void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_data_arb_mode)
+void hw_atl_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_data_arb_mode)
{
- aq_hw_write_reg_bit(aq_hw, tps_data_tc_arb_mode_adr,
- tps_data_tc_arb_mode_msk,
- tps_data_tc_arb_mode_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TC_ARB_MODE_ADR,
+ HW_ATL_TPS_DATA_TC_ARB_MODE_MSK,
+ HW_ATL_TPS_DATA_TC_ARB_MODE_SHIFT,
tx_pkt_shed_data_arb_mode);
}
-void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
- u32 curr_time_res)
+void hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
+ u32 curr_time_res)
{
- aq_hw_write_reg_bit(aq_hw, tps_desc_rate_ta_rst_adr,
- tps_desc_rate_ta_rst_msk,
- tps_desc_rate_ta_rst_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_TA_RST_ADR,
+ HW_ATL_TPS_DESC_RATE_TA_RST_MSK,
+ HW_ATL_TPS_DESC_RATE_TA_RST_SHIFT,
curr_time_res);
}
-void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_rate_lim)
+void hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_rate_lim)
{
- aq_hw_write_reg_bit(aq_hw, tps_desc_rate_lim_adr,
- tps_desc_rate_lim_msk,
- tps_desc_rate_lim_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_LIM_ADR,
+ HW_ATL_TPS_DESC_RATE_LIM_MSK,
+ HW_ATL_TPS_DESC_RATE_LIM_SHIFT,
tx_pkt_shed_desc_rate_lim);
}
-void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_tc_arb_mode)
+void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 arb_mode)
{
- aq_hw_write_reg_bit(aq_hw, tps_desc_tc_arb_mode_adr,
- tps_desc_tc_arb_mode_msk,
- tps_desc_tc_arb_mode_shift,
- tx_pkt_shed_desc_tc_arb_mode);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TC_ARB_MODE_ADR,
+ HW_ATL_TPS_DESC_TC_ARB_MODE_MSK,
+ HW_ATL_TPS_DESC_TC_ARB_MODE_SHIFT,
+ arb_mode);
}
-void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_tc_max_credit,
- u32 tc)
+void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 max_credit,
+ u32 tc)
{
- aq_hw_write_reg_bit(aq_hw, tps_desc_tctcredit_max_adr(tc),
- tps_desc_tctcredit_max_msk,
- tps_desc_tctcredit_max_shift,
- tx_pkt_shed_desc_tc_max_credit);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTCREDIT_MAX_ADR(tc),
+ HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSK,
+ HW_ATL_TPS_DESC_TCTCREDIT_MAX_SHIFT,
+ max_credit);
}
-void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_tc_weight, u32 tc)
+void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_tc_weight,
+ u32 tc)
{
- aq_hw_write_reg_bit(aq_hw, tps_desc_tctweight_adr(tc),
- tps_desc_tctweight_msk,
- tps_desc_tctweight_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTWEIGHT_ADR(tc),
+ HW_ATL_TPS_DESC_TCTWEIGHT_MSK,
+ HW_ATL_TPS_DESC_TCTWEIGHT_SHIFT,
tx_pkt_shed_desc_tc_weight);
}
-void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_vm_arb_mode)
+void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 arb_mode)
{
- aq_hw_write_reg_bit(aq_hw, tps_desc_vm_arb_mode_adr,
- tps_desc_vm_arb_mode_msk,
- tps_desc_vm_arb_mode_shift,
- tx_pkt_shed_desc_vm_arb_mode);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_VM_ARB_MODE_ADR,
+ HW_ATL_TPS_DESC_VM_ARB_MODE_MSK,
+ HW_ATL_TPS_DESC_VM_ARB_MODE_SHIFT,
+ arb_mode);
}
-void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_tc_data_max_credit,
- u32 tc)
+void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 max_credit,
+ u32 tc)
{
- aq_hw_write_reg_bit(aq_hw, tps_data_tctcredit_max_adr(tc),
- tps_data_tctcredit_max_msk,
- tps_data_tctcredit_max_shift,
- tx_pkt_shed_tc_data_max_credit);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTCREDIT_MAX_ADR(tc),
+ HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSK,
+ HW_ATL_TPS_DATA_TCTCREDIT_MAX_SHIFT,
+ max_credit);
}
-void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_tc_data_weight, u32 tc)
+void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_tc_data_weight,
+ u32 tc)
{
- aq_hw_write_reg_bit(aq_hw, tps_data_tctweight_adr(tc),
- tps_data_tctweight_msk,
- tps_data_tctweight_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTWEIGHT_ADR(tc),
+ HW_ATL_TPS_DATA_TCTWEIGHT_MSK,
+ HW_ATL_TPS_DATA_TCTWEIGHT_SHIFT,
tx_pkt_shed_tc_data_weight);
}
/* tx */
-void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis)
+void hw_atl_tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis)
{
- aq_hw_write_reg_bit(aq_hw, tx_reg_res_dsbl_adr,
- tx_reg_res_dsbl_msk,
- tx_reg_res_dsbl_shift, tx_reg_res_dis);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TX_REG_RES_DSBL_ADR,
+ HW_ATL_TX_REG_RES_DSBL_MSK,
+ HW_ATL_TX_REG_RES_DSBL_SHIFT, tx_reg_res_dis);
}
/* msm */
-u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_msm_reg_access_status_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg_bit(aq_hw, msm_reg_access_busy_adr,
- msm_reg_access_busy_msk,
- msm_reg_access_busy_shift);
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_MSM_REG_ACCESS_BUSY_ADR,
+ HW_ATL_MSM_REG_ACCESS_BUSY_MSK,
+ HW_ATL_MSM_REG_ACCESS_BUSY_SHIFT);
}
-void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
- u32 reg_addr_for_indirect_addr)
+void hw_atl_msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
+ u32 reg_addr_for_indirect_addr)
{
- aq_hw_write_reg_bit(aq_hw, msm_reg_addr_adr,
- msm_reg_addr_msk,
- msm_reg_addr_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_ADDR_ADR,
+ HW_ATL_MSM_REG_ADDR_MSK,
+ HW_ATL_MSM_REG_ADDR_SHIFT,
reg_addr_for_indirect_addr);
}
-void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe)
+void hw_atl_msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe)
{
- aq_hw_write_reg_bit(aq_hw, msm_reg_rd_strobe_adr,
- msm_reg_rd_strobe_msk,
- msm_reg_rd_strobe_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_RD_STROBE_ADR,
+ HW_ATL_MSM_REG_RD_STROBE_MSK,
+ HW_ATL_MSM_REG_RD_STROBE_SHIFT,
reg_rd_strobe);
}
-u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_msm_reg_rd_data_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, msm_reg_rd_data_adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_MSM_REG_RD_DATA_ADR);
}
-void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data)
+void hw_atl_msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data)
{
- aq_hw_write_reg(aq_hw, msm_reg_wr_data_adr, reg_wr_data);
+ aq_hw_write_reg(aq_hw, HW_ATL_MSM_REG_WR_DATA_ADR, reg_wr_data);
}
-void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe)
+void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe)
{
- aq_hw_write_reg_bit(aq_hw, msm_reg_wr_strobe_adr,
- msm_reg_wr_strobe_msk,
- msm_reg_wr_strobe_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_WR_STROBE_ADR,
+ HW_ATL_MSM_REG_WR_STROBE_MSK,
+ HW_ATL_MSM_REG_WR_STROBE_SHIFT,
reg_wr_strobe);
}
/* pci */
-void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis)
+void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis)
{
- aq_hw_write_reg_bit(aq_hw, pci_reg_res_dsbl_adr,
- pci_reg_res_dsbl_msk,
- pci_reg_res_dsbl_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_PCI_REG_RES_DSBL_ADR,
+ HW_ATL_PCI_REG_RES_DSBL_MSK,
+ HW_ATL_PCI_REG_RES_DSBL_SHIFT,
pci_reg_res_dis);
}
-void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, u32 glb_cpu_scratch_scp,
- u32 scratch_scp)
+void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
+ u32 glb_cpu_scratch_scp,
+ u32 scratch_scp)
{
- aq_hw_write_reg(aq_hw, glb_cpu_scratch_scp_adr(scratch_scp),
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp),
glb_cpu_scratch_scp);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index ed1085b95adb..dfb426f2dc2c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -21,657 +21,681 @@ struct aq_hw_s;
/* global */
/* set global microprocessor semaphore */
-void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
- u32 semaphore);
+void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
+ u32 semaphore);
/* get global microprocessor semaphore */
-u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore);
+u32 hw_atl_reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore);
/* set global register reset disable */
-void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis);
+void hw_atl_glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis);
/* set soft reset */
-void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res);
+void hw_atl_glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res);
/* get soft reset */
-u32 glb_soft_res_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw);
/* stats */
-u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw);
/* get rx dma good octet counter lsw */
-u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
/* get rx dma good packet counter lsw */
-u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
/* get tx dma good octet counter lsw */
-u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
/* get tx dma good packet counter lsw */
-u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
/* get rx dma good octet counter msw */
-u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
/* get rx dma good packet counter msw */
-u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
/* get tx dma good octet counter msw */
-u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
/* get tx dma good packet counter msw */
-u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
/* get msm rx errors counter register */
-u32 reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw);
/* get msm rx unicast frames counter register */
-u32 reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm rx multicast frames counter register */
-u32 reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm rx broadcast frames counter register */
-u32 reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm rx broadcast octets counter register 1 */
-u32 reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
/* get msm rx unicast octets counter register 0 */
-u32 reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
/* get rx dma statistics counter 7 */
-u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw);
/* get msm tx errors counter register */
-u32 reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw);
/* get msm tx unicast frames counter register */
-u32 reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm tx multicast frames counter register */
-u32 reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm tx broadcast frames counter register */
-u32 reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm tx multicast octets counter register 1 */
-u32 reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw);
/* get msm tx broadcast octets counter register 1 */
-u32 reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
/* get msm tx unicast octets counter register 0 */
-u32 reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
/* get global mif identification */
-u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw);
/* interrupt */
/* set interrupt auto mask lsw */
-void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw);
+void hw_atl_itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_auto_masklsw);
/* set interrupt mapping enable rx */
-void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx);
+void hw_atl_itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx,
+ u32 rx);
/* set interrupt mapping enable tx */
-void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx);
+void hw_atl_itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx,
+ u32 tx);
/* set interrupt mapping rx */
-void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx);
+void hw_atl_itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx);
/* set interrupt mapping tx */
-void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx);
+void hw_atl_itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx);
/* set interrupt mask clear lsw */
-void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw);
+void hw_atl_itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_msk_clearlsw);
/* set interrupt mask set lsw */
-void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw);
+void hw_atl_itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw);
/* set interrupt register reset disable */
-void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis);
+void hw_atl_itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis);
/* set interrupt status clear lsw */
-void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
- u32 irq_status_clearlsw);
+void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_status_clearlsw);
/* get interrupt status lsw */
-u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_itr_irq_statuslsw_get(struct aq_hw_s *aq_hw);
/* get reset interrupt */
-u32 itr_res_irq_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw);
/* set reset interrupt */
-void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq);
+void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq);
/* rdm */
/* set cpu id */
-void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
+void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
/* set rx dca enable */
-void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en);
+void hw_atl_rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en);
/* set rx dca mode */
-void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode);
+void hw_atl_rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode);
/* set rx descriptor data buffer size */
-void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_data_buff_size,
+void hw_atl_rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_data_buff_size,
u32 descriptor);
/* set rx descriptor dca enable */
-void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
- u32 dca);
+void hw_atl_rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
+ u32 dca);
/* set rx descriptor enable */
-void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
- u32 descriptor);
+void hw_atl_rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
+ u32 descriptor);
/* set rx descriptor header splitting */
-void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_head_splitting,
+void hw_atl_rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_splitting,
u32 descriptor);
/* get rx descriptor head pointer */
-u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
+u32 hw_atl_rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
/* set rx descriptor length */
-void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
- u32 descriptor);
+void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
+ u32 descriptor);
/* set rx descriptor write-back interrupt enable */
-void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_wr_wb_irq_en);
+void hw_atl_rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_wr_wb_irq_en);
/* set rx header dca enable */
-void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
- u32 dca);
+void hw_atl_rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
+ u32 dca);
/* set rx payload dca enable */
-void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca);
+void hw_atl_rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en,
+ u32 dca);
/* set rx descriptor header buffer size */
-void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_head_buff_size,
- u32 descriptor);
+void hw_atl_rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_buff_size,
+ u32 descriptor);
/* set rx descriptor reset */
-void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
- u32 descriptor);
+void hw_atl_rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
+ u32 descriptor);
/* Set RDM Interrupt Moderation Enable */
-void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en);
+void hw_atl_rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+ u32 rdm_intr_moder_en);
/* reg */
/* set general interrupt mapping register */
-void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx);
+void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map,
+ u32 regidx);
/* get general interrupt status register */
-u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_gen_irq_status_get(struct aq_hw_s *aq_hw);
/* set interrupt global control register */
-void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl);
+void hw_atl_reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl);
/* set interrupt throttle register */
-void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle);
+void hw_atl_reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle);
/* set rx dma descriptor base address lsw */
-void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
- u32 rx_dma_desc_base_addrlsw,
+void hw_atl_reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrlsw,
u32 descriptor);
/* set rx dma descriptor base address msw */
-void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
- u32 rx_dma_desc_base_addrmsw,
+void hw_atl_reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrmsw,
u32 descriptor);
/* get rx dma descriptor status register */
-u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor);
+u32 hw_atl_reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor);
/* set rx dma descriptor tail pointer register */
-void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
- u32 rx_dma_desc_tail_ptr,
+void hw_atl_reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_tail_ptr,
u32 descriptor);
/* set rx filter multicast filter mask register */
-void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
- u32 rx_flr_mcst_flr_msk);
+void hw_atl_reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
+ u32 rx_flr_mcst_flr_msk);
/* set rx filter multicast filter register */
-void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
- u32 filter);
+void hw_atl_reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
+ u32 filter);
/* set rx filter rss control register 1 */
-void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
- u32 rx_flr_rss_control1);
+void hw_atl_reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
+ u32 rx_flr_rss_control1);
/* Set RX Filter Control Register 2 */
-void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2);
+void hw_atl_reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2);
/* Set RX Interrupt Moderation Control Register */
-void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
- u32 rx_intr_moderation_ctl,
+void hw_atl_reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 rx_intr_moderation_ctl,
u32 queue);
/* set tx dma debug control */
-void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl);
+void hw_atl_reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_debug_ctl);
/* set tx dma descriptor base address lsw */
-void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
- u32 tx_dma_desc_base_addrlsw,
+void hw_atl_reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrlsw,
u32 descriptor);
/* set tx dma descriptor base address msw */
-void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
- u32 tx_dma_desc_base_addrmsw,
+void hw_atl_reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrmsw,
u32 descriptor);
/* set tx dma descriptor tail pointer register */
-void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
- u32 tx_dma_desc_tail_ptr,
- u32 descriptor);
+void hw_atl_reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_tail_ptr,
+ u32 descriptor);
/* Set TX Interrupt Moderation Control Register */
-void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
- u32 tx_intr_moderation_ctl,
- u32 queue);
+void hw_atl_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 tx_intr_moderation_ctl,
+ u32 queue);
/* set global microprocessor scratch pad */
-void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
- u32 glb_cpu_scratch_scp, u32 scratch_scp);
+void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
+ u32 glb_cpu_scratch_scp,
+ u32 scratch_scp);
/* rpb */
/* set dma system loopback */
-void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
+void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
/* set rx traffic class mode */
-void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
- u32 rx_traf_class_mode);
+void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
+ u32 rx_traf_class_mode);
/* set rx buffer enable */
-void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en);
+void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en);
/* set rx buffer high threshold (per tc) */
-void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 rx_buff_hi_threshold_per_tc,
- u32 buffer);
+void hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_hi_threshold_per_tc,
+ u32 buffer);
/* set rx buffer low threshold (per tc) */
-void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 rx_buff_lo_threshold_per_tc,
+void hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_lo_threshold_per_tc,
u32 buffer);
/* set rx flow control mode */
-void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode);
+void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode);
/* set rx packet buffer size (per tc) */
-void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
- u32 rx_pkt_buff_size_per_tc,
- u32 buffer);
+void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_pkt_buff_size_per_tc,
+ u32 buffer);
/* set rx xoff enable (per tc) */
-void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
- u32 buffer);
+void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
+ u32 buffer);
/* rpf */
/* set l2 broadcast count threshold */
-void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
- u32 l2broadcast_count_threshold);
+void hw_atl_rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_count_threshold);
/* set l2 broadcast enable */
-void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en);
+void hw_atl_rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en);
/* set l2 broadcast filter action */
-void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
- u32 l2broadcast_flr_act);
+void hw_atl_rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_flr_act);
/* set l2 multicast filter enable */
-void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en,
- u32 filter);
+void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
+ u32 l2multicast_flr_en,
+ u32 filter);
/* set l2 promiscuous mode enable */
-void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
- u32 l2promiscuous_mode_en);
+void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
+ u32 l2promiscuous_mode_en);
/* set l2 unicast filter action */
-void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act,
- u32 filter);
+void hw_atl_rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_flr_act,
+ u32 filter);
/* set l2 unicast filter enable */
-void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
- u32 filter);
+void hw_atl_rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
+ u32 filter);
/* set l2 unicast destination address lsw */
-void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
- u32 l2unicast_dest_addresslsw,
+void hw_atl_rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addresslsw,
u32 filter);
/* set l2 unicast destination address msw */
-void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
- u32 l2unicast_dest_addressmsw,
+void hw_atl_rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addressmsw,
u32 filter);
/* Set L2 Accept all Multicast packets */
-void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
- u32 l2_accept_all_mc_packets);
+void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
+ u32 l2_accept_all_mc_packets);
/* set user-priority tc mapping */
-void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
- u32 user_priority_tc_map, u32 tc);
+void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
+ u32 user_priority_tc_map, u32 tc);
/* set rss key address */
-void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr);
+void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr);
/* set rss key write data */
-void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data);
+void hw_atl_rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data);
/* get rss key write enable */
-u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw);
/* set rss key write enable */
-void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en);
+void hw_atl_rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en);
/* set rss redirection table address */
-void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
- u32 rss_redir_tbl_addr);
+void hw_atl_rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_addr);
/* set rss redirection table write data */
-void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
- u32 rss_redir_tbl_wr_data);
+void hw_atl_rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_wr_data);
/* get rss redirection write enable */
-u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw);
/* set rss redirection write enable */
-void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en);
+void hw_atl_rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en);
/* set tpo to rpf system loopback */
-void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
- u32 tpo_to_rpf_sys_lbk);
+void hw_atl_rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
+ u32 tpo_to_rpf_sys_lbk);
/* set vlan inner ethertype */
-void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht);
+void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht);
/* set vlan outer ethertype */
-void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht);
+void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht);
/* set vlan promiscuous mode enable */
-void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en);
+void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
+ u32 vlan_prom_mode_en);
/* Set VLAN untagged action */
-void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act);
+void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
+ u32 vlan_untagged_act);
/* Set VLAN accept untagged packets */
-void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
- u32 vlan_accept_untagged_packets);
+void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
+ u32 vlan_acc_untagged_packets);
/* Set VLAN filter enable */
-void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter);
+void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en,
+ u32 filter);
/* Set VLAN Filter Action */
-void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
- u32 filter);
+void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
+ u32 filter);
/* Set VLAN ID Filter */
-void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter);
+void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
+ u32 filter);
/* set ethertype filter enable */
-void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter);
+void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
+ u32 filter);
/* set ethertype user-priority enable */
-void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
- u32 etht_user_priority_en, u32 filter);
+void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
+ u32 etht_user_priority_en,
+ u32 filter);
/* set ethertype rx queue enable */
-void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en,
- u32 filter);
+void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw,
+ u32 etht_rx_queue_en,
+ u32 filter);
/* set ethertype rx queue */
-void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
- u32 filter);
+void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
+ u32 filter);
/* set ethertype user-priority */
-void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority,
- u32 filter);
+void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw,
+ u32 etht_user_priority,
+ u32 filter);
/* set ethertype management queue */
-void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
- u32 filter);
+void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
+ u32 filter);
/* set ethertype filter action */
-void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
- u32 filter);
+void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
+ u32 filter);
/* set ethertype filter */
-void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
+void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
/* rpo */
/* set ipv4 header checksum offload enable */
-void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 ipv4header_crc_offload_en);
+void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en);
/* set rx descriptor vlan stripping */
-void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_vlan_stripping,
- u32 descriptor);
+void hw_atl_rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_vlan_stripping,
+ u32 descriptor);
/* set tcp/udp checksum offload enable */
-void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 tcp_udp_crc_offload_en);
+void hw_atl_rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en);
/* Set LRO Patch Optimization Enable. */
-void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
- u32 lro_patch_optimization_en);
+void hw_atl_rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
+ u32 lro_patch_optimization_en);
/* Set Large Receive Offload Enable */
-void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en);
+void hw_atl_rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en);
/* Set LRO Q Sessions Limit */
-void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw, u32 lro_qsessions_lim);
+void hw_atl_rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
+ u32 lro_qsessions_lim);
/* Set LRO Total Descriptor Limit */
-void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim);
+void hw_atl_rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw,
+ u32 lro_total_desc_lim);
/* Set LRO Min Payload of First Packet */
-void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
- u32 lro_min_pld_of_first_pkt);
+void hw_atl_rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lro_min_pld_of_first_pkt);
/* Set LRO Packet Limit */
-void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim);
+void hw_atl_rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim);
/* Set LRO Max Number of Descriptors */
-void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
- u32 lro_max_desc_num, u32 lro);
+void hw_atl_rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_desc_num, u32 lro);
/* Set LRO Time Base Divider */
-void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
- u32 lro_time_base_divider);
+void hw_atl_rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
+ u32 lro_time_base_divider);
/*Set LRO Inactive Interval */
-void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
- u32 lro_inactive_interval);
+void hw_atl_rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_inactive_interval);
/*Set LRO Max Coalescing Interval */
-void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
- u32 lro_max_coalescing_interval);
+void hw_atl_rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_coal_interval);
/* rx */
/* set rx register reset disable */
-void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis);
+void hw_atl_rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis);
/* tdm */
/* set cpu id */
-void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
+void hw_atl_tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
/* set large send offload enable */
-void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
- u32 large_send_offload_en);
+void hw_atl_tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 large_send_offload_en);
/* set tx descriptor enable */
-void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor);
+void hw_atl_tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en,
+ u32 descriptor);
/* set tx dca enable */
-void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en);
+void hw_atl_tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en);
/* set tx dca mode */
-void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode);
+void hw_atl_tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode);
/* set tx descriptor dca enable */
-void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca);
+void hw_atl_tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en,
+ u32 dca);
/* get tx descriptor head pointer */
-u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
+u32 hw_atl_tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
/* set tx descriptor length */
-void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
- u32 descriptor);
+void hw_atl_tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
+ u32 descriptor);
/* set tx descriptor write-back interrupt enable */
-void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
- u32 tx_desc_wr_wb_irq_en);
+void hw_atl_tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_irq_en);
/* set tx descriptor write-back threshold */
-void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
- u32 tx_desc_wr_wb_threshold,
+void hw_atl_tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_threshold,
u32 descriptor);
/* Set TDM Interrupt Moderation Enable */
-void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
- u32 tdm_irq_moderation_en);
+void hw_atl_tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+ u32 tdm_irq_moderation_en);
/* thm */
/* set lso tcp flag of first packet */
-void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
- u32 lso_tcp_flag_of_first_pkt);
+void hw_atl_thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_first_pkt);
/* set lso tcp flag of last packet */
-void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
- u32 lso_tcp_flag_of_last_pkt);
+void hw_atl_thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_last_pkt);
/* set lso tcp flag of middle packet */
-void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
- u32 lso_tcp_flag_of_middle_pkt);
+void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_middle_pkt);
/* tpb */
/* set tx buffer enable */
-void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
+void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
/* set tx buffer high threshold (per tc) */
-void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 tx_buff_hi_threshold_per_tc,
+void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_hi_threshold_per_tc,
u32 buffer);
/* set tx buffer low threshold (per tc) */
-void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 tx_buff_lo_threshold_per_tc,
+void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_lo_threshold_per_tc,
u32 buffer);
/* set tx dma system loopback enable */
-void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en);
+void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en);
/* set tx packet buffer size (per tc) */
-void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_buff_size_per_tc, u32 buffer);
+void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_buff_size_per_tc, u32 buffer);
/* set tx path pad insert enable */
-void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en);
+void hw_atl_tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en);
/* tpo */
/* set ipv4 header checksum offload enable */
-void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 ipv4header_crc_offload_en);
+void hw_atl_tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en);
/* set tcp/udp checksum offload enable */
-void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 tcp_udp_crc_offload_en);
+void hw_atl_tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en);
/* set tx pkt system loopback enable */
-void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en);
+void hw_atl_tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_sys_lbk_en);
/* tps */
/* set tx packet scheduler data arbitration mode */
-void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_data_arb_mode);
+void hw_atl_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_data_arb_mode);
/* set tx packet scheduler descriptor rate current time reset */
-void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
- u32 curr_time_res);
+void hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
+ u32 curr_time_res);
/* set tx packet scheduler descriptor rate limit */
-void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_rate_lim);
+void hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_rate_lim);
/* set tx packet scheduler descriptor tc arbitration mode */
-void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_tc_arb_mode);
+void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 arb_mode);
/* set tx packet scheduler descriptor tc max credit */
-void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_tc_max_credit,
+void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 max_credit,
u32 tc);
/* set tx packet scheduler descriptor tc weight */
-void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_tc_weight,
+void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_tc_weight,
u32 tc);
/* set tx packet scheduler descriptor vm arbitration mode */
-void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_vm_arb_mode);
+void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 arb_mode);
/* set tx packet scheduler tc data max credit */
-void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_tc_data_max_credit,
+void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 max_credit,
u32 tc);
/* set tx packet scheduler tc data weight */
-void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_tc_data_weight,
+void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_tc_data_weight,
u32 tc);
/* tx */
/* set tx register reset disable */
-void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis);
+void hw_atl_tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis);
/* msm */
/* get register access status */
-u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_msm_reg_access_status_get(struct aq_hw_s *aq_hw);
/* set register address for indirect address */
-void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
- u32 reg_addr_for_indirect_addr);
+void hw_atl_msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
+ u32 reg_addr_for_indirect_addr);
/* set register read strobe */
-void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe);
+void hw_atl_msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe);
/* get register read data */
-u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_msm_reg_rd_data_get(struct aq_hw_s *aq_hw);
/* set register write data */
-void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data);
+void hw_atl_msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data);
/* set register write strobe */
-void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe);
+void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe);
/* pci */
/* set pci register reset disable */
-void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
+void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
#endif /* HW_ATL_LLH_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index 93450ec930e8..e0cf70120f1d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -18,91 +18,91 @@
* base address: 0x000003a0
* parameter: semaphore {s} | stride size 0x4 | range [0, 15]
*/
-#define glb_cpu_sem_adr(semaphore) (0x000003a0u + (semaphore) * 0x4)
+#define HW_ATL_GLB_CPU_SEM_ADR(semaphore) (0x000003a0u + (semaphore) * 0x4)
/* register address for bitfield rx dma good octet counter lsw [1f:0] */
-#define stats_rx_dma_good_octet_counterlsw__adr 0x00006808
+#define HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERLSW 0x00006808
/* register address for bitfield rx dma good packet counter lsw [1f:0] */
-#define stats_rx_dma_good_pkt_counterlsw__adr 0x00006800
+#define HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERLSW 0x00006800
/* register address for bitfield tx dma good octet counter lsw [1f:0] */
-#define stats_tx_dma_good_octet_counterlsw__adr 0x00008808
+#define HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERLSW 0x00008808
/* register address for bitfield tx dma good packet counter lsw [1f:0] */
-#define stats_tx_dma_good_pkt_counterlsw__adr 0x00008800
+#define HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERLSW 0x00008800
/* register address for bitfield rx dma good octet counter msw [3f:20] */
-#define stats_rx_dma_good_octet_countermsw__adr 0x0000680c
+#define HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERMSW 0x0000680c
/* register address for bitfield rx dma good packet counter msw [3f:20] */
-#define stats_rx_dma_good_pkt_countermsw__adr 0x00006804
+#define HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERMSW 0x00006804
/* register address for bitfield tx dma good octet counter msw [3f:20] */
-#define stats_tx_dma_good_octet_countermsw__adr 0x0000880c
+#define HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERMSW 0x0000880c
/* register address for bitfield tx dma good packet counter msw [3f:20] */
-#define stats_tx_dma_good_pkt_countermsw__adr 0x00008804
+#define HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERMSW 0x00008804
/* preprocessor definitions for msm rx errors counter register */
-#define mac_msm_rx_errs_cnt_adr 0x00000120u
+#define HW_ATL_MAC_MSM_RX_ERRS_CNT_ADR 0x00000120u
/* preprocessor definitions for msm rx unicast frames counter register */
-#define mac_msm_rx_ucst_frm_cnt_adr 0x000000e0u
+#define HW_ATL_MAC_MSM_RX_UCST_FRM_CNT_ADR 0x000000e0u
/* preprocessor definitions for msm rx multicast frames counter register */
-#define mac_msm_rx_mcst_frm_cnt_adr 0x000000e8u
+#define HW_ATL_MAC_MSM_RX_MCST_FRM_CNT_ADR 0x000000e8u
/* preprocessor definitions for msm rx broadcast frames counter register */
-#define mac_msm_rx_bcst_frm_cnt_adr 0x000000f0u
+#define HW_ATL_MAC_MSM_RX_BCST_FRM_CNT_ADR 0x000000f0u
/* preprocessor definitions for msm rx broadcast octets counter register 1 */
-#define mac_msm_rx_bcst_octets_counter1_adr 0x000001b0u
+#define HW_ATL_MAC_MSM_RX_BCST_OCTETS_COUNTER1_ADR 0x000001b0u
/* preprocessor definitions for msm rx broadcast octets counter register 2 */
-#define mac_msm_rx_bcst_octets_counter2_adr 0x000001b4u
+#define HW_ATL_MAC_MSM_RX_BCST_OCTETS_COUNTER2_ADR 0x000001b4u
/* preprocessor definitions for msm rx unicast octets counter register 0 */
-#define mac_msm_rx_ucst_octets_counter0_adr 0x000001b8u
+#define HW_ATL_MAC_MSM_RX_UCST_OCTETS_COUNTER0_ADR 0x000001b8u
/* preprocessor definitions for rx dma statistics counter 7 */
-#define rx_dma_stat_counter7_adr 0x00006818u
+#define HW_ATL_RX_DMA_STAT_COUNTER7_ADR 0x00006818u
/* preprocessor definitions for msm tx unicast frames counter register */
-#define mac_msm_tx_ucst_frm_cnt_adr 0x00000108u
+#define HW_ATL_MAC_MSM_TX_UCST_FRM_CNT_ADR 0x00000108u
/* preprocessor definitions for msm tx multicast frames counter register */
-#define mac_msm_tx_mcst_frm_cnt_adr 0x00000110u
+#define HW_ATL_MAC_MSM_TX_MCST_FRM_CNT_ADR 0x00000110u
/* preprocessor definitions for global mif identification */
-#define glb_mif_id_adr 0x0000001cu
+#define HW_ATL_GLB_MIF_ID_ADR 0x0000001cu
/* register address for bitfield iamr_lsw[1f:0] */
-#define itr_iamrlsw_adr 0x00002090
+#define HW_ATL_ITR_IAMRLSW_ADR 0x00002090
/* register address for bitfield rx dma drop packet counter [1f:0] */
-#define rpb_rx_dma_drop_pkt_cnt_adr 0x00006818
+#define HW_ATL_RPB_RX_DMA_DROP_PKT_CNT_ADR 0x00006818
/* register address for bitfield imcr_lsw[1f:0] */
-#define itr_imcrlsw_adr 0x00002070
+#define HW_ATL_ITR_IMCRLSW_ADR 0x00002070
/* register address for bitfield imsr_lsw[1f:0] */
-#define itr_imsrlsw_adr 0x00002060
+#define HW_ATL_ITR_IMSRLSW_ADR 0x00002060
/* register address for bitfield itr_reg_res_dsbl */
-#define itr_reg_res_dsbl_adr 0x00002300
+#define HW_ATL_ITR_REG_RES_DSBL_ADR 0x00002300
/* bitmask for bitfield itr_reg_res_dsbl */
-#define itr_reg_res_dsbl_msk 0x20000000
+#define HW_ATL_ITR_REG_RES_DSBL_MSK 0x20000000
/* lower bit position of bitfield itr_reg_res_dsbl */
-#define itr_reg_res_dsbl_shift 29
+#define HW_ATL_ITR_REG_RES_DSBL_SHIFT 29
/* register address for bitfield iscr_lsw[1f:0] */
-#define itr_iscrlsw_adr 0x00002050
+#define HW_ATL_ITR_ISCRLSW_ADR 0x00002050
/* register address for bitfield isr_lsw[1f:0] */
-#define itr_isrlsw_adr 0x00002000
+#define HW_ATL_ITR_ISRLSW_ADR 0x00002000
/* register address for bitfield itr_reset */
-#define itr_res_adr 0x00002300
+#define HW_ATL_ITR_RES_ADR 0x00002300
/* bitmask for bitfield itr_reset */
-#define itr_res_msk 0x80000000
+#define HW_ATL_ITR_RES_MSK 0x80000000
/* lower bit position of bitfield itr_reset */
-#define itr_res_shift 31
+#define HW_ATL_ITR_RES_SHIFT 31
/* register address for bitfield dca{d}_cpuid[7:0] */
-#define rdm_dcadcpuid_adr(dca) (0x00006100 + (dca) * 0x4)
+#define HW_ATL_RDM_DCADCPUID_ADR(dca) (0x00006100 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_cpuid[7:0] */
-#define rdm_dcadcpuid_msk 0x000000ff
+#define HW_ATL_RDM_DCADCPUID_MSK 0x000000ff
/* lower bit position of bitfield dca{d}_cpuid[7:0] */
-#define rdm_dcadcpuid_shift 0
+#define HW_ATL_RDM_DCADCPUID_SHIFT 0
/* register address for bitfield dca_en */
-#define rdm_dca_en_adr 0x00006180
+#define HW_ATL_RDM_DCA_EN_ADR 0x00006180
/* rx dca_en bitfield definitions
* preprocessor definitions for the bitfield "dca_en".
@@ -110,17 +110,17 @@
*/
/* register address for bitfield dca_en */
-#define rdm_dca_en_adr 0x00006180
+#define HW_ATL_RDM_DCA_EN_ADR 0x00006180
/* bitmask for bitfield dca_en */
-#define rdm_dca_en_msk 0x80000000
+#define HW_ATL_RDM_DCA_EN_MSK 0x80000000
/* inverted bitmask for bitfield dca_en */
-#define rdm_dca_en_mskn 0x7fffffff
+#define HW_ATL_RDM_DCA_EN_MSKN 0x7fffffff
/* lower bit position of bitfield dca_en */
-#define rdm_dca_en_shift 31
+#define HW_ATL_RDM_DCA_EN_SHIFT 31
/* width of bitfield dca_en */
-#define rdm_dca_en_width 1
+#define HW_ATL_RDM_DCA_EN_WIDTH 1
/* default value of bitfield dca_en */
-#define rdm_dca_en_default 0x1
+#define HW_ATL_RDM_DCA_EN_DEFAULT 0x1
/* rx dca_mode[3:0] bitfield definitions
* preprocessor definitions for the bitfield "dca_mode[3:0]".
@@ -128,17 +128,17 @@
*/
/* register address for bitfield dca_mode[3:0] */
-#define rdm_dca_mode_adr 0x00006180
+#define HW_ATL_RDM_DCA_MODE_ADR 0x00006180
/* bitmask for bitfield dca_mode[3:0] */
-#define rdm_dca_mode_msk 0x0000000f
+#define HW_ATL_RDM_DCA_MODE_MSK 0x0000000f
/* inverted bitmask for bitfield dca_mode[3:0] */
-#define rdm_dca_mode_mskn 0xfffffff0
+#define HW_ATL_RDM_DCA_MODE_MSKN 0xfffffff0
/* lower bit position of bitfield dca_mode[3:0] */
-#define rdm_dca_mode_shift 0
+#define HW_ATL_RDM_DCA_MODE_SHIFT 0
/* width of bitfield dca_mode[3:0] */
-#define rdm_dca_mode_width 4
+#define HW_ATL_RDM_DCA_MODE_WIDTH 4
/* default value of bitfield dca_mode[3:0] */
-#define rdm_dca_mode_default 0x0
+#define HW_ATL_RDM_DCA_MODE_DEFAULT 0x0
/* rx desc{d}_data_size[4:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_data_size[4:0]".
@@ -147,17 +147,18 @@
*/
/* register address for bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDDATA_SIZE_ADR(descriptor) \
+ (0x00005b18 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_msk 0x0000001f
+#define HW_ATL_RDM_DESCDDATA_SIZE_MSK 0x0000001f
/* inverted bitmask for bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_mskn 0xffffffe0
+#define HW_ATL_RDM_DESCDDATA_SIZE_MSKN 0xffffffe0
/* lower bit position of bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_shift 0
+#define HW_ATL_RDM_DESCDDATA_SIZE_SHIFT 0
/* width of bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_width 5
+#define HW_ATL_RDM_DESCDDATA_SIZE_WIDTH 5
/* default value of bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_default 0x0
+#define HW_ATL_RDM_DESCDDATA_SIZE_DEFAULT 0x0
/* rx dca{d}_desc_en bitfield definitions
* preprocessor definitions for the bitfield "dca{d}_desc_en".
@@ -166,17 +167,17 @@
*/
/* register address for bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_adr(dca) (0x00006100 + (dca) * 0x4)
+#define HW_ATL_RDM_DCADDESC_EN_ADR(dca) (0x00006100 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_msk 0x80000000
+#define HW_ATL_RDM_DCADDESC_EN_MSK 0x80000000
/* inverted bitmask for bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_mskn 0x7fffffff
+#define HW_ATL_RDM_DCADDESC_EN_MSKN 0x7fffffff
/* lower bit position of bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_shift 31
+#define HW_ATL_RDM_DCADDESC_EN_SHIFT 31
/* width of bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_width 1
+#define HW_ATL_RDM_DCADDESC_EN_WIDTH 1
/* default value of bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_default 0x0
+#define HW_ATL_RDM_DCADDESC_EN_DEFAULT 0x0
/* rx desc{d}_en bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_en".
@@ -185,17 +186,17 @@
*/
/* register address for bitfield desc{d}_en */
-#define rdm_descden_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDEN_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_en */
-#define rdm_descden_msk 0x80000000
+#define HW_ATL_RDM_DESCDEN_MSK 0x80000000
/* inverted bitmask for bitfield desc{d}_en */
-#define rdm_descden_mskn 0x7fffffff
+#define HW_ATL_RDM_DESCDEN_MSKN 0x7fffffff
/* lower bit position of bitfield desc{d}_en */
-#define rdm_descden_shift 31
+#define HW_ATL_RDM_DESCDEN_SHIFT 31
/* width of bitfield desc{d}_en */
-#define rdm_descden_width 1
+#define HW_ATL_RDM_DESCDEN_WIDTH 1
/* default value of bitfield desc{d}_en */
-#define rdm_descden_default 0x0
+#define HW_ATL_RDM_DESCDEN_DEFAULT 0x0
/* rx desc{d}_hdr_size[4:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_hdr_size[4:0]".
@@ -204,17 +205,18 @@
*/
/* register address for bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDHDR_SIZE_ADR(descriptor) \
+ (0x00005b18 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_msk 0x00001f00
+#define HW_ATL_RDM_DESCDHDR_SIZE_MSK 0x00001f00
/* inverted bitmask for bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_mskn 0xffffe0ff
+#define HW_ATL_RDM_DESCDHDR_SIZE_MSKN 0xffffe0ff
/* lower bit position of bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_shift 8
+#define HW_ATL_RDM_DESCDHDR_SIZE_SHIFT 8
/* width of bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_width 5
+#define HW_ATL_RDM_DESCDHDR_SIZE_WIDTH 5
/* default value of bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_default 0x0
+#define HW_ATL_RDM_DESCDHDR_SIZE_DEFAULT 0x0
/* rx desc{d}_hdr_split bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_hdr_split".
@@ -223,17 +225,18 @@
*/
/* register address for bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDHDR_SPLIT_ADR(descriptor) \
+ (0x00005b08 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_msk 0x10000000
+#define HW_ATL_RDM_DESCDHDR_SPLIT_MSK 0x10000000
/* inverted bitmask for bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_mskn 0xefffffff
+#define HW_ATL_RDM_DESCDHDR_SPLIT_MSKN 0xefffffff
/* lower bit position of bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_shift 28
+#define HW_ATL_RDM_DESCDHDR_SPLIT_SHIFT 28
/* width of bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_width 1
+#define HW_ATL_RDM_DESCDHDR_SPLIT_WIDTH 1
/* default value of bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_default 0x0
+#define HW_ATL_RDM_DESCDHDR_SPLIT_DEFAULT 0x0
/* rx desc{d}_hd[c:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_hd[c:0]".
@@ -242,15 +245,15 @@
*/
/* register address for bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_adr(descriptor) (0x00005b0c + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDHD_ADR(descriptor) (0x00005b0c + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_msk 0x00001fff
+#define HW_ATL_RDM_DESCDHD_MSK 0x00001fff
/* inverted bitmask for bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_mskn 0xffffe000
+#define HW_ATL_RDM_DESCDHD_MSKN 0xffffe000
/* lower bit position of bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_shift 0
+#define HW_ATL_RDM_DESCDHD_SHIFT 0
/* width of bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_width 13
+#define HW_ATL_RDM_DESCDHD_WIDTH 13
/* rx desc{d}_len[9:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_len[9:0]".
@@ -259,17 +262,17 @@
*/
/* register address for bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDLEN_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_msk 0x00001ff8
+#define HW_ATL_RDM_DESCDLEN_MSK 0x00001ff8
/* inverted bitmask for bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_mskn 0xffffe007
+#define HW_ATL_RDM_DESCDLEN_MSKN 0xffffe007
/* lower bit position of bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_shift 3
+#define HW_ATL_RDM_DESCDLEN_SHIFT 3
/* width of bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_width 10
+#define HW_ATL_RDM_DESCDLEN_WIDTH 10
/* default value of bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_default 0x0
+#define HW_ATL_RDM_DESCDLEN_DEFAULT 0x0
/* rx desc{d}_reset bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_reset".
@@ -278,17 +281,17 @@
*/
/* register address for bitfield desc{d}_reset */
-#define rdm_descdreset_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDRESET_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_reset */
-#define rdm_descdreset_msk 0x02000000
+#define HW_ATL_RDM_DESCDRESET_MSK 0x02000000
/* inverted bitmask for bitfield desc{d}_reset */
-#define rdm_descdreset_mskn 0xfdffffff
+#define HW_ATL_RDM_DESCDRESET_MSKN 0xfdffffff
/* lower bit position of bitfield desc{d}_reset */
-#define rdm_descdreset_shift 25
+#define HW_ATL_RDM_DESCDRESET_SHIFT 25
/* width of bitfield desc{d}_reset */
-#define rdm_descdreset_width 1
+#define HW_ATL_RDM_DESCDRESET_WIDTH 1
/* default value of bitfield desc{d}_reset */
-#define rdm_descdreset_default 0x0
+#define HW_ATL_RDM_DESCDRESET_DEFAULT 0x0
/* rx int_desc_wrb_en bitfield definitions
* preprocessor definitions for the bitfield "int_desc_wrb_en".
@@ -296,17 +299,17 @@
*/
/* register address for bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_adr 0x00005a30
+#define HW_ATL_RDM_INT_DESC_WRB_EN_ADR 0x00005a30
/* bitmask for bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_msk 0x00000004
+#define HW_ATL_RDM_INT_DESC_WRB_EN_MSK 0x00000004
/* inverted bitmask for bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_mskn 0xfffffffb
+#define HW_ATL_RDM_INT_DESC_WRB_EN_MSKN 0xfffffffb
/* lower bit position of bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_shift 2
+#define HW_ATL_RDM_INT_DESC_WRB_EN_SHIFT 2
/* width of bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_width 1
+#define HW_ATL_RDM_INT_DESC_WRB_EN_WIDTH 1
/* default value of bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_default 0x0
+#define HW_ATL_RDM_INT_DESC_WRB_EN_DEFAULT 0x0
/* rx dca{d}_hdr_en bitfield definitions
* preprocessor definitions for the bitfield "dca{d}_hdr_en".
@@ -315,17 +318,17 @@
*/
/* register address for bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_adr(dca) (0x00006100 + (dca) * 0x4)
+#define HW_ATL_RDM_DCADHDR_EN_ADR(dca) (0x00006100 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_msk 0x40000000
+#define HW_ATL_RDM_DCADHDR_EN_MSK 0x40000000
/* inverted bitmask for bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_mskn 0xbfffffff
+#define HW_ATL_RDM_DCADHDR_EN_MSKN 0xbfffffff
/* lower bit position of bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_shift 30
+#define HW_ATL_RDM_DCADHDR_EN_SHIFT 30
/* width of bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_width 1
+#define HW_ATL_RDM_DCADHDR_EN_WIDTH 1
/* default value of bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_default 0x0
+#define HW_ATL_RDM_DCADHDR_EN_DEFAULT 0x0
/* rx dca{d}_pay_en bitfield definitions
* preprocessor definitions for the bitfield "dca{d}_pay_en".
@@ -334,17 +337,17 @@
*/
/* register address for bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_adr(dca) (0x00006100 + (dca) * 0x4)
+#define HW_ATL_RDM_DCADPAY_EN_ADR(dca) (0x00006100 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_msk 0x20000000
+#define HW_ATL_RDM_DCADPAY_EN_MSK 0x20000000
/* inverted bitmask for bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_mskn 0xdfffffff
+#define HW_ATL_RDM_DCADPAY_EN_MSKN 0xdfffffff
/* lower bit position of bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_shift 29
+#define HW_ATL_RDM_DCADPAY_EN_SHIFT 29
/* width of bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_width 1
+#define HW_ATL_RDM_DCADPAY_EN_WIDTH 1
/* default value of bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_default 0x0
+#define HW_ATL_RDM_DCADPAY_EN_DEFAULT 0x0
/* RX rdm_int_rim_en Bitfield Definitions
* Preprocessor definitions for the bitfield "rdm_int_rim_en".
@@ -352,51 +355,51 @@
*/
/* Register address for bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_adr 0x00005A30
+#define HW_ATL_RDM_INT_RIM_EN_ADR 0x00005A30
/* Bitmask for bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_msk 0x00000008
+#define HW_ATL_RDM_INT_RIM_EN_MSK 0x00000008
/* Inverted bitmask for bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_mskn 0xFFFFFFF7
+#define HW_ATL_RDM_INT_RIM_EN_MSKN 0xFFFFFFF7
/* Lower bit position of bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_shift 3
+#define HW_ATL_RDM_INT_RIM_EN_SHIFT 3
/* Width of bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_width 1
+#define HW_ATL_RDM_INT_RIM_EN_WIDTH 1
/* Default value of bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_default 0x0
+#define HW_ATL_RDM_INT_RIM_EN_DEFAULT 0x0
/* general interrupt mapping register definitions
* preprocessor definitions for general interrupt mapping register
* base address: 0x00002180
* parameter: regidx {f} | stride size 0x4 | range [0, 3]
*/
-#define gen_intr_map_adr(regidx) (0x00002180u + (regidx) * 0x4)
+#define HW_ATL_GEN_INTR_MAP_ADR(regidx) (0x00002180u + (regidx) * 0x4)
/* general interrupt status register definitions
* preprocessor definitions for general interrupt status register
* address: 0x000021A0
*/
-#define gen_intr_stat_adr 0x000021A4U
+#define HW_ATL_GEN_INTR_STAT_ADR 0x000021A4U
/* interrupt global control register definitions
* preprocessor definitions for interrupt global control register
* address: 0x00002300
*/
-#define intr_glb_ctl_adr 0x00002300u
+#define HW_ATL_INTR_GLB_CTL_ADR 0x00002300u
/* interrupt throttle register definitions
* preprocessor definitions for interrupt throttle register
* base address: 0x00002800
* parameter: throttle {t} | stride size 0x4 | range [0, 31]
*/
-#define intr_thr_adr(throttle) (0x00002800u + (throttle) * 0x4)
+#define HW_ATL_INTR_THR_ADR(throttle) (0x00002800u + (throttle) * 0x4)
/* rx dma descriptor base address lsw definitions
* preprocessor definitions for rx dma descriptor base address lsw
* base address: 0x00005b00
* parameter: descriptor {d} | stride size 0x20 | range [0, 31]
*/
-#define rx_dma_desc_base_addrlsw_adr(descriptor) \
+#define HW_ATL_RX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor) \
(0x00005b00u + (descriptor) * 0x20)
/* rx dma descriptor base address msw definitions
@@ -404,7 +407,7 @@
* base address: 0x00005b04
* parameter: descriptor {d} | stride size 0x20 | range [0, 31]
*/
-#define rx_dma_desc_base_addrmsw_adr(descriptor) \
+#define HW_ATL_RX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor) \
(0x00005b04u + (descriptor) * 0x20)
/* rx dma descriptor status register definitions
@@ -412,46 +415,48 @@
* base address: 0x00005b14
* parameter: descriptor {d} | stride size 0x20 | range [0, 31]
*/
-#define rx_dma_desc_stat_adr(descriptor) (0x00005b14u + (descriptor) * 0x20)
+#define HW_ATL_RX_DMA_DESC_STAT_ADR(descriptor) \
+ (0x00005b14u + (descriptor) * 0x20)
/* rx dma descriptor tail pointer register definitions
* preprocessor definitions for rx dma descriptor tail pointer register
* base address: 0x00005b10
* parameter: descriptor {d} | stride size 0x20 | range [0, 31]
*/
-#define rx_dma_desc_tail_ptr_adr(descriptor) (0x00005b10u + (descriptor) * 0x20)
+#define HW_ATL_RX_DMA_DESC_TAIL_PTR_ADR(descriptor) \
+ (0x00005b10u + (descriptor) * 0x20)
/* rx interrupt moderation control register definitions
* Preprocessor definitions for RX Interrupt Moderation Control Register
* Base Address: 0x00005A40
* Parameter: RIM {R} | stride size 0x4 | range [0, 31]
*/
-#define rx_intr_moderation_ctl_adr(rim) (0x00005A40u + (rim) * 0x4)
+#define HW_ATL_RX_INTR_MODERATION_CTL_ADR(rim) (0x00005A40u + (rim) * 0x4)
/* rx filter multicast filter mask register definitions
* preprocessor definitions for rx filter multicast filter mask register
* address: 0x00005270
*/
-#define rx_flr_mcst_flr_msk_adr 0x00005270u
+#define HW_ATL_RX_FLR_MCST_FLR_MSK_ADR 0x00005270u
/* rx filter multicast filter register definitions
* preprocessor definitions for rx filter multicast filter register
* base address: 0x00005250
* parameter: filter {f} | stride size 0x4 | range [0, 7]
*/
-#define rx_flr_mcst_flr_adr(filter) (0x00005250u + (filter) * 0x4)
+#define HW_ATL_RX_FLR_MCST_FLR_ADR(filter) (0x00005250u + (filter) * 0x4)
/* RX Filter RSS Control Register 1 Definitions
* Preprocessor definitions for RX Filter RSS Control Register 1
* Address: 0x000054C0
*/
-#define rx_flr_rss_control1_adr 0x000054C0u
+#define HW_ATL_RX_FLR_RSS_CONTROL1_ADR 0x000054C0u
/* RX Filter Control Register 2 Definitions
* Preprocessor definitions for RX Filter Control Register 2
* Address: 0x00005104
*/
-#define rx_flr_control2_adr 0x00005104u
+#define HW_ATL_RX_FLR_CONTROL2_ADR 0x00005104u
/* tx tx dma debug control [1f:0] bitfield definitions
* preprocessor definitions for the bitfield "tx dma debug control [1f:0]".
@@ -459,24 +464,24 @@
*/
/* register address for bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_adr 0x00008920
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_ADR 0x00008920
/* bitmask for bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_msk 0xffffffff
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_MSK 0xffffffff
/* inverted bitmask for bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_mskn 0x00000000
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_MSKN 0x00000000
/* lower bit position of bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_shift 0
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_SHIFT 0
/* width of bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_width 32
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_WIDTH 32
/* default value of bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_default 0x0
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_DEFAULT 0x0
/* tx dma descriptor base address lsw definitions
* preprocessor definitions for tx dma descriptor base address lsw
* base address: 0x00007c00
* parameter: descriptor {d} | stride size 0x40 | range [0, 31]
*/
-#define tx_dma_desc_base_addrlsw_adr(descriptor) \
+#define HW_ATL_TX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor) \
(0x00007c00u + (descriptor) * 0x40)
/* tx dma descriptor tail pointer register definitions
@@ -484,7 +489,8 @@
* base address: 0x00007c10
* parameter: descriptor {d} | stride size 0x40 | range [0, 31]
*/
-#define tx_dma_desc_tail_ptr_adr(descriptor) (0x00007c10u + (descriptor) * 0x40)
+#define HW_ATL_TX_DMA_DESC_TAIL_PTR_ADR(descriptor) \
+ (0x00007c10u + (descriptor) * 0x40)
/* rx dma_sys_loopback bitfield definitions
* preprocessor definitions for the bitfield "dma_sys_loopback".
@@ -492,17 +498,17 @@
*/
/* register address for bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_adr 0x00005000
+#define HW_ATL_RPB_DMA_SYS_LBK_ADR 0x00005000
/* bitmask for bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_msk 0x00000040
+#define HW_ATL_RPB_DMA_SYS_LBK_MSK 0x00000040
/* inverted bitmask for bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_mskn 0xffffffbf
+#define HW_ATL_RPB_DMA_SYS_LBK_MSKN 0xffffffbf
/* lower bit position of bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_shift 6
+#define HW_ATL_RPB_DMA_SYS_LBK_SHIFT 6
/* width of bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_width 1
+#define HW_ATL_RPB_DMA_SYS_LBK_WIDTH 1
/* default value of bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_default 0x0
+#define HW_ATL_RPB_DMA_SYS_LBK_DEFAULT 0x0
/* rx rx_tc_mode bitfield definitions
* preprocessor definitions for the bitfield "rx_tc_mode".
@@ -510,17 +516,17 @@
*/
/* register address for bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_adr 0x00005700
+#define HW_ATL_RPB_RPF_RX_TC_MODE_ADR 0x00005700
/* bitmask for bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_msk 0x00000100
+#define HW_ATL_RPB_RPF_RX_TC_MODE_MSK 0x00000100
/* inverted bitmask for bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_mskn 0xfffffeff
+#define HW_ATL_RPB_RPF_RX_TC_MODE_MSKN 0xfffffeff
/* lower bit position of bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_shift 8
+#define HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT 8
/* width of bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_width 1
+#define HW_ATL_RPB_RPF_RX_TC_MODE_WIDTH 1
/* default value of bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_default 0x0
+#define HW_ATL_RPB_RPF_RX_TC_MODE_DEFAULT 0x0
/* rx rx_buf_en bitfield definitions
* preprocessor definitions for the bitfield "rx_buf_en".
@@ -528,17 +534,17 @@
*/
/* register address for bitfield rx_buf_en */
-#define rpb_rx_buf_en_adr 0x00005700
+#define HW_ATL_RPB_RX_BUF_EN_ADR 0x00005700
/* bitmask for bitfield rx_buf_en */
-#define rpb_rx_buf_en_msk 0x00000001
+#define HW_ATL_RPB_RX_BUF_EN_MSK 0x00000001
/* inverted bitmask for bitfield rx_buf_en */
-#define rpb_rx_buf_en_mskn 0xfffffffe
+#define HW_ATL_RPB_RX_BUF_EN_MSKN 0xfffffffe
/* lower bit position of bitfield rx_buf_en */
-#define rpb_rx_buf_en_shift 0
+#define HW_ATL_RPB_RX_BUF_EN_SHIFT 0
/* width of bitfield rx_buf_en */
-#define rpb_rx_buf_en_width 1
+#define HW_ATL_RPB_RX_BUF_EN_WIDTH 1
/* default value of bitfield rx_buf_en */
-#define rpb_rx_buf_en_default 0x0
+#define HW_ATL_RPB_RX_BUF_EN_DEFAULT 0x0
/* rx rx{b}_hi_thresh[d:0] bitfield definitions
* preprocessor definitions for the bitfield "rx{b}_hi_thresh[d:0]".
@@ -547,17 +553,17 @@
*/
/* register address for bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10)
+#define HW_ATL_RPB_RXBHI_THRESH_ADR(buffer) (0x00005714 + (buffer) * 0x10)
/* bitmask for bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_msk 0x3fff0000
+#define HW_ATL_RPB_RXBHI_THRESH_MSK 0x3fff0000
/* inverted bitmask for bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_mskn 0xc000ffff
+#define HW_ATL_RPB_RXBHI_THRESH_MSKN 0xc000ffff
/* lower bit position of bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_shift 16
+#define HW_ATL_RPB_RXBHI_THRESH_SHIFT 16
/* width of bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_width 14
+#define HW_ATL_RPB_RXBHI_THRESH_WIDTH 14
/* default value of bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_default 0x0
+#define HW_ATL_RPB_RXBHI_THRESH_DEFAULT 0x0
/* rx rx{b}_lo_thresh[d:0] bitfield definitions
* preprocessor definitions for the bitfield "rx{b}_lo_thresh[d:0]".
@@ -566,17 +572,17 @@
*/
/* register address for bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10)
+#define HW_ATL_RPB_RXBLO_THRESH_ADR(buffer) (0x00005714 + (buffer) * 0x10)
/* bitmask for bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_msk 0x00003fff
+#define HW_ATL_RPB_RXBLO_THRESH_MSK 0x00003fff
/* inverted bitmask for bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_mskn 0xffffc000
+#define HW_ATL_RPB_RXBLO_THRESH_MSKN 0xffffc000
/* lower bit position of bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_shift 0
+#define HW_ATL_RPB_RXBLO_THRESH_SHIFT 0
/* width of bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_width 14
+#define HW_ATL_RPB_RXBLO_THRESH_WIDTH 14
/* default value of bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_default 0x0
+#define HW_ATL_RPB_RXBLO_THRESH_DEFAULT 0x0
/* rx rx_fc_mode[1:0] bitfield definitions
* preprocessor definitions for the bitfield "rx_fc_mode[1:0]".
@@ -584,17 +590,17 @@
*/
/* register address for bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_adr 0x00005700
+#define HW_ATL_RPB_RX_FC_MODE_ADR 0x00005700
/* bitmask for bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_msk 0x00000030
+#define HW_ATL_RPB_RX_FC_MODE_MSK 0x00000030
/* inverted bitmask for bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_mskn 0xffffffcf
+#define HW_ATL_RPB_RX_FC_MODE_MSKN 0xffffffcf
/* lower bit position of bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_shift 4
+#define HW_ATL_RPB_RX_FC_MODE_SHIFT 4
/* width of bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_width 2
+#define HW_ATL_RPB_RX_FC_MODE_WIDTH 2
/* default value of bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_default 0x0
+#define HW_ATL_RPB_RX_FC_MODE_DEFAULT 0x0
/* rx rx{b}_buf_size[8:0] bitfield definitions
* preprocessor definitions for the bitfield "rx{b}_buf_size[8:0]".
@@ -603,17 +609,17 @@
*/
/* register address for bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_adr(buffer) (0x00005710 + (buffer) * 0x10)
+#define HW_ATL_RPB_RXBBUF_SIZE_ADR(buffer) (0x00005710 + (buffer) * 0x10)
/* bitmask for bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_msk 0x000001ff
+#define HW_ATL_RPB_RXBBUF_SIZE_MSK 0x000001ff
/* inverted bitmask for bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_mskn 0xfffffe00
+#define HW_ATL_RPB_RXBBUF_SIZE_MSKN 0xfffffe00
/* lower bit position of bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_shift 0
+#define HW_ATL_RPB_RXBBUF_SIZE_SHIFT 0
/* width of bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_width 9
+#define HW_ATL_RPB_RXBBUF_SIZE_WIDTH 9
/* default value of bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_default 0x0
+#define HW_ATL_RPB_RXBBUF_SIZE_DEFAULT 0x0
/* rx rx{b}_xoff_en bitfield definitions
* preprocessor definitions for the bitfield "rx{b}_xoff_en".
@@ -622,17 +628,17 @@
*/
/* register address for bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_adr(buffer) (0x00005714 + (buffer) * 0x10)
+#define HW_ATL_RPB_RXBXOFF_EN_ADR(buffer) (0x00005714 + (buffer) * 0x10)
/* bitmask for bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_msk 0x80000000
+#define HW_ATL_RPB_RXBXOFF_EN_MSK 0x80000000
/* inverted bitmask for bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_mskn 0x7fffffff
+#define HW_ATL_RPB_RXBXOFF_EN_MSKN 0x7fffffff
/* lower bit position of bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_shift 31
+#define HW_ATL_RPB_RXBXOFF_EN_SHIFT 31
/* width of bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_width 1
+#define HW_ATL_RPB_RXBXOFF_EN_WIDTH 1
/* default value of bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_default 0x0
+#define HW_ATL_RPB_RXBXOFF_EN_DEFAULT 0x0
/* rx l2_bc_thresh[f:0] bitfield definitions
* preprocessor definitions for the bitfield "l2_bc_thresh[f:0]".
@@ -640,17 +646,17 @@
*/
/* register address for bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_adr 0x00005100
+#define HW_ATL_RPFL2BC_THRESH_ADR 0x00005100
/* bitmask for bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_msk 0xffff0000
+#define HW_ATL_RPFL2BC_THRESH_MSK 0xffff0000
/* inverted bitmask for bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_mskn 0x0000ffff
+#define HW_ATL_RPFL2BC_THRESH_MSKN 0x0000ffff
/* lower bit position of bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_shift 16
+#define HW_ATL_RPFL2BC_THRESH_SHIFT 16
/* width of bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_width 16
+#define HW_ATL_RPFL2BC_THRESH_WIDTH 16
/* default value of bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_default 0x0
+#define HW_ATL_RPFL2BC_THRESH_DEFAULT 0x0
/* rx l2_bc_en bitfield definitions
* preprocessor definitions for the bitfield "l2_bc_en".
@@ -658,17 +664,17 @@
*/
/* register address for bitfield l2_bc_en */
-#define rpfl2bc_en_adr 0x00005100
+#define HW_ATL_RPFL2BC_EN_ADR 0x00005100
/* bitmask for bitfield l2_bc_en */
-#define rpfl2bc_en_msk 0x00000001
+#define HW_ATL_RPFL2BC_EN_MSK 0x00000001
/* inverted bitmask for bitfield l2_bc_en */
-#define rpfl2bc_en_mskn 0xfffffffe
+#define HW_ATL_RPFL2BC_EN_MSKN 0xfffffffe
/* lower bit position of bitfield l2_bc_en */
-#define rpfl2bc_en_shift 0
+#define HW_ATL_RPFL2BC_EN_SHIFT 0
/* width of bitfield l2_bc_en */
-#define rpfl2bc_en_width 1
+#define HW_ATL_RPFL2BC_EN_WIDTH 1
/* default value of bitfield l2_bc_en */
-#define rpfl2bc_en_default 0x0
+#define HW_ATL_RPFL2BC_EN_DEFAULT 0x0
/* rx l2_bc_act[2:0] bitfield definitions
* preprocessor definitions for the bitfield "l2_bc_act[2:0]".
@@ -676,17 +682,17 @@
*/
/* register address for bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_adr 0x00005100
+#define HW_ATL_RPFL2BC_ACT_ADR 0x00005100
/* bitmask for bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_msk 0x00007000
+#define HW_ATL_RPFL2BC_ACT_MSK 0x00007000
/* inverted bitmask for bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_mskn 0xffff8fff
+#define HW_ATL_RPFL2BC_ACT_MSKN 0xffff8fff
/* lower bit position of bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_shift 12
+#define HW_ATL_RPFL2BC_ACT_SHIFT 12
/* width of bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_width 3
+#define HW_ATL_RPFL2BC_ACT_WIDTH 3
/* default value of bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_default 0x0
+#define HW_ATL_RPFL2BC_ACT_DEFAULT 0x0
/* rx l2_mc_en{f} bitfield definitions
* preprocessor definitions for the bitfield "l2_mc_en{f}".
@@ -695,17 +701,17 @@
*/
/* register address for bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_adr(filter) (0x00005250 + (filter) * 0x4)
+#define HW_ATL_RPFL2MC_ENF_ADR(filter) (0x00005250 + (filter) * 0x4)
/* bitmask for bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_msk 0x80000000
+#define HW_ATL_RPFL2MC_ENF_MSK 0x80000000
/* inverted bitmask for bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_mskn 0x7fffffff
+#define HW_ATL_RPFL2MC_ENF_MSKN 0x7fffffff
/* lower bit position of bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_shift 31
+#define HW_ATL_RPFL2MC_ENF_SHIFT 31
/* width of bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_width 1
+#define HW_ATL_RPFL2MC_ENF_WIDTH 1
/* default value of bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_default 0x0
+#define HW_ATL_RPFL2MC_ENF_DEFAULT 0x0
/* rx l2_promis_mode bitfield definitions
* preprocessor definitions for the bitfield "l2_promis_mode".
@@ -713,17 +719,17 @@
*/
/* register address for bitfield l2_promis_mode */
-#define rpfl2promis_mode_adr 0x00005100
+#define HW_ATL_RPFL2PROMIS_MODE_ADR 0x00005100
/* bitmask for bitfield l2_promis_mode */
-#define rpfl2promis_mode_msk 0x00000008
+#define HW_ATL_RPFL2PROMIS_MODE_MSK 0x00000008
/* inverted bitmask for bitfield l2_promis_mode */
-#define rpfl2promis_mode_mskn 0xfffffff7
+#define HW_ATL_RPFL2PROMIS_MODE_MSKN 0xfffffff7
/* lower bit position of bitfield l2_promis_mode */
-#define rpfl2promis_mode_shift 3
+#define HW_ATL_RPFL2PROMIS_MODE_SHIFT 3
/* width of bitfield l2_promis_mode */
-#define rpfl2promis_mode_width 1
+#define HW_ATL_RPFL2PROMIS_MODE_WIDTH 1
/* default value of bitfield l2_promis_mode */
-#define rpfl2promis_mode_default 0x0
+#define HW_ATL_RPFL2PROMIS_MODE_DEFAULT 0x0
/* rx l2_uc_act{f}[2:0] bitfield definitions
* preprocessor definitions for the bitfield "l2_uc_act{f}[2:0]".
@@ -732,17 +738,17 @@
*/
/* register address for bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_adr(filter) (0x00005114 + (filter) * 0x8)
+#define HW_ATL_RPFL2UC_ACTF_ADR(filter) (0x00005114 + (filter) * 0x8)
/* bitmask for bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_msk 0x00070000
+#define HW_ATL_RPFL2UC_ACTF_MSK 0x00070000
/* inverted bitmask for bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_mskn 0xfff8ffff
+#define HW_ATL_RPFL2UC_ACTF_MSKN 0xfff8ffff
/* lower bit position of bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_shift 16
+#define HW_ATL_RPFL2UC_ACTF_SHIFT 16
/* width of bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_width 3
+#define HW_ATL_RPFL2UC_ACTF_WIDTH 3
/* default value of bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_default 0x0
+#define HW_ATL_RPFL2UC_ACTF_DEFAULT 0x0
/* rx l2_uc_en{f} bitfield definitions
* preprocessor definitions for the bitfield "l2_uc_en{f}".
@@ -751,26 +757,26 @@
*/
/* register address for bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_adr(filter) (0x00005114 + (filter) * 0x8)
+#define HW_ATL_RPFL2UC_ENF_ADR(filter) (0x00005114 + (filter) * 0x8)
/* bitmask for bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_msk 0x80000000
+#define HW_ATL_RPFL2UC_ENF_MSK 0x80000000
/* inverted bitmask for bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_mskn 0x7fffffff
+#define HW_ATL_RPFL2UC_ENF_MSKN 0x7fffffff
/* lower bit position of bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_shift 31
+#define HW_ATL_RPFL2UC_ENF_SHIFT 31
/* width of bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_width 1
+#define HW_ATL_RPFL2UC_ENF_WIDTH 1
/* default value of bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_default 0x0
+#define HW_ATL_RPFL2UC_ENF_DEFAULT 0x0
/* register address for bitfield l2_uc_da{f}_lsw[1f:0] */
-#define rpfl2uc_daflsw_adr(filter) (0x00005110 + (filter) * 0x8)
+#define HW_ATL_RPFL2UC_DAFLSW_ADR(filter) (0x00005110 + (filter) * 0x8)
/* register address for bitfield l2_uc_da{f}_msw[f:0] */
-#define rpfl2uc_dafmsw_adr(filter) (0x00005114 + (filter) * 0x8)
+#define HW_ATL_RPFL2UC_DAFMSW_ADR(filter) (0x00005114 + (filter) * 0x8)
/* bitmask for bitfield l2_uc_da{f}_msw[f:0] */
-#define rpfl2uc_dafmsw_msk 0x0000ffff
+#define HW_ATL_RPFL2UC_DAFMSW_MSK 0x0000ffff
/* lower bit position of bitfield l2_uc_da{f}_msw[f:0] */
-#define rpfl2uc_dafmsw_shift 0
+#define HW_ATL_RPFL2UC_DAFMSW_SHIFT 0
/* rx l2_mc_accept_all bitfield definitions
* Preprocessor definitions for the bitfield "l2_mc_accept_all".
@@ -778,22 +784,22 @@
*/
/* Register address for bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_adr 0x00005270
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_ADR 0x00005270
/* Bitmask for bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_msk 0x00004000
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_MSK 0x00004000
/* Inverted bitmask for bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_mskn 0xFFFFBFFF
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_MSKN 0xFFFFBFFF
/* Lower bit position of bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_shift 14
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_SHIFT 14
/* Width of bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_width 1
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_WIDTH 1
/* Default value of bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_default 0x0
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_DEFAULT 0x0
/* width of bitfield rx_tc_up{t}[2:0] */
-#define rpf_rpb_rx_tc_upt_width 3
+#define HW_ATL_RPF_RPB_RX_TC_UPT_WIDTH 3
/* default value of bitfield rx_tc_up{t}[2:0] */
-#define rpf_rpb_rx_tc_upt_default 0x0
+#define HW_ATL_RPF_RPB_RX_TC_UPT_DEFAULT 0x0
/* rx rss_key_addr[4:0] bitfield definitions
* preprocessor definitions for the bitfield "rss_key_addr[4:0]".
@@ -801,17 +807,17 @@
*/
/* register address for bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_adr 0x000054d0
+#define HW_ATL_RPF_RSS_KEY_ADDR_ADR 0x000054d0
/* bitmask for bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_msk 0x0000001f
+#define HW_ATL_RPF_RSS_KEY_ADDR_MSK 0x0000001f
/* inverted bitmask for bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_mskn 0xffffffe0
+#define HW_ATL_RPF_RSS_KEY_ADDR_MSKN 0xffffffe0
/* lower bit position of bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_shift 0
+#define HW_ATL_RPF_RSS_KEY_ADDR_SHIFT 0
/* width of bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_width 5
+#define HW_ATL_RPF_RSS_KEY_ADDR_WIDTH 5
/* default value of bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_default 0x0
+#define HW_ATL_RPF_RSS_KEY_ADDR_DEFAULT 0x0
/* rx rss_key_wr_data[1f:0] bitfield definitions
* preprocessor definitions for the bitfield "rss_key_wr_data[1f:0]".
@@ -819,17 +825,17 @@
*/
/* register address for bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_adr 0x000054d4
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_ADR 0x000054d4
/* bitmask for bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_msk 0xffffffff
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_MSK 0xffffffff
/* inverted bitmask for bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_mskn 0x00000000
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_MSKN 0x00000000
/* lower bit position of bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_shift 0
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_SHIFT 0
/* width of bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_width 32
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_WIDTH 32
/* default value of bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_default 0x0
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_DEFAULT 0x0
/* rx rss_key_wr_en_i bitfield definitions
* preprocessor definitions for the bitfield "rss_key_wr_en_i".
@@ -837,17 +843,17 @@
*/
/* register address for bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_adr 0x000054d0
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_ADR 0x000054d0
/* bitmask for bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_msk 0x00000020
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_MSK 0x00000020
/* inverted bitmask for bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_mskn 0xffffffdf
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_MSKN 0xffffffdf
/* lower bit position of bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_shift 5
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT 5
/* width of bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_width 1
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_WIDTH 1
/* default value of bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_default 0x0
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_DEFAULT 0x0
/* rx rss_redir_addr[3:0] bitfield definitions
* preprocessor definitions for the bitfield "rss_redir_addr[3:0]".
@@ -855,17 +861,17 @@
*/
/* register address for bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_adr 0x000054e0
+#define HW_ATL_RPF_RSS_REDIR_ADDR_ADR 0x000054e0
/* bitmask for bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_msk 0x0000000f
+#define HW_ATL_RPF_RSS_REDIR_ADDR_MSK 0x0000000f
/* inverted bitmask for bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_mskn 0xfffffff0
+#define HW_ATL_RPF_RSS_REDIR_ADDR_MSKN 0xfffffff0
/* lower bit position of bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_shift 0
+#define HW_ATL_RPF_RSS_REDIR_ADDR_SHIFT 0
/* width of bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_width 4
+#define HW_ATL_RPF_RSS_REDIR_ADDR_WIDTH 4
/* default value of bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_default 0x0
+#define HW_ATL_RPF_RSS_REDIR_ADDR_DEFAULT 0x0
/* rx rss_redir_wr_data[f:0] bitfield definitions
* preprocessor definitions for the bitfield "rss_redir_wr_data[f:0]".
@@ -873,17 +879,17 @@
*/
/* register address for bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_adr 0x000054e4
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_ADR 0x000054e4
/* bitmask for bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_msk 0x0000ffff
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_MSK 0x0000ffff
/* inverted bitmask for bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_mskn 0xffff0000
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_MSKN 0xffff0000
/* lower bit position of bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_shift 0
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_SHIFT 0
/* width of bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_width 16
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_WIDTH 16
/* default value of bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_default 0x0
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_DEFAULT 0x0
/* rx rss_redir_wr_en_i bitfield definitions
* preprocessor definitions for the bitfield "rss_redir_wr_en_i".
@@ -891,17 +897,17 @@
*/
/* register address for bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_adr 0x000054e0
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR 0x000054e0
/* bitmask for bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_msk 0x00000010
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK 0x00000010
/* inverted bitmask for bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_mskn 0xffffffef
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_MSKN 0xffffffef
/* lower bit position of bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_shift 4
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT 4
/* width of bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_width 1
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_WIDTH 1
/* default value of bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_default 0x0
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_DEFAULT 0x0
/* rx tpo_rpf_sys_loopback bitfield definitions
* preprocessor definitions for the bitfield "tpo_rpf_sys_loopback".
@@ -909,17 +915,17 @@
*/
/* register address for bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_adr 0x00005000
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_ADR 0x00005000
/* bitmask for bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_msk 0x00000100
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_MSK 0x00000100
/* inverted bitmask for bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_mskn 0xfffffeff
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_MSKN 0xfffffeff
/* lower bit position of bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_shift 8
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_SHIFT 8
/* width of bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_width 1
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_WIDTH 1
/* default value of bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_default 0x0
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_DEFAULT 0x0
/* rx vl_inner_tpid[f:0] bitfield definitions
* preprocessor definitions for the bitfield "vl_inner_tpid[f:0]".
@@ -927,17 +933,17 @@
*/
/* register address for bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_adr 0x00005284
+#define HW_ATL_RPF_VL_INNER_TPID_ADR 0x00005284
/* bitmask for bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_msk 0x0000ffff
+#define HW_ATL_RPF_VL_INNER_TPID_MSK 0x0000ffff
/* inverted bitmask for bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_mskn 0xffff0000
+#define HW_ATL_RPF_VL_INNER_TPID_MSKN 0xffff0000
/* lower bit position of bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_shift 0
+#define HW_ATL_RPF_VL_INNER_TPID_SHIFT 0
/* width of bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_width 16
+#define HW_ATL_RPF_VL_INNER_TPID_WIDTH 16
/* default value of bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_default 0x8100
+#define HW_ATL_RPF_VL_INNER_TPID_DEFAULT 0x8100
/* rx vl_outer_tpid[f:0] bitfield definitions
* preprocessor definitions for the bitfield "vl_outer_tpid[f:0]".
@@ -945,17 +951,17 @@
*/
/* register address for bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_adr 0x00005284
+#define HW_ATL_RPF_VL_OUTER_TPID_ADR 0x00005284
/* bitmask for bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_msk 0xffff0000
+#define HW_ATL_RPF_VL_OUTER_TPID_MSK 0xffff0000
/* inverted bitmask for bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_mskn 0x0000ffff
+#define HW_ATL_RPF_VL_OUTER_TPID_MSKN 0x0000ffff
/* lower bit position of bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_shift 16
+#define HW_ATL_RPF_VL_OUTER_TPID_SHIFT 16
/* width of bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_width 16
+#define HW_ATL_RPF_VL_OUTER_TPID_WIDTH 16
/* default value of bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_default 0x88a8
+#define HW_ATL_RPF_VL_OUTER_TPID_DEFAULT 0x88a8
/* rx vl_promis_mode bitfield definitions
* preprocessor definitions for the bitfield "vl_promis_mode".
@@ -963,17 +969,17 @@
*/
/* register address for bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_adr 0x00005280
+#define HW_ATL_RPF_VL_PROMIS_MODE_ADR 0x00005280
/* bitmask for bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_msk 0x00000002
+#define HW_ATL_RPF_VL_PROMIS_MODE_MSK 0x00000002
/* inverted bitmask for bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_mskn 0xfffffffd
+#define HW_ATL_RPF_VL_PROMIS_MODE_MSKN 0xfffffffd
/* lower bit position of bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_shift 1
+#define HW_ATL_RPF_VL_PROMIS_MODE_SHIFT 1
/* width of bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_width 1
+#define HW_ATL_RPF_VL_PROMIS_MODE_WIDTH 1
/* default value of bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_default 0x0
+#define HW_ATL_RPF_VL_PROMIS_MODE_DEFAULT 0x0
/* RX vl_accept_untagged_mode Bitfield Definitions
* Preprocessor definitions for the bitfield "vl_accept_untagged_mode".
@@ -981,17 +987,17 @@
*/
/* Register address for bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_adr 0x00005280
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_ADR 0x00005280
/* Bitmask for bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_msk 0x00000004
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSK 0x00000004
/* Inverted bitmask for bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_mskn 0xFFFFFFFB
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSKN 0xFFFFFFFB
/* Lower bit position of bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_shift 2
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_SHIFT 2
/* Width of bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_width 1
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_WIDTH 1
/* Default value of bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_default 0x0
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_DEFAULT 0x0
/* rX vl_untagged_act[2:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "vl_untagged_act[2:0]".
@@ -999,17 +1005,17 @@
*/
/* Register address for bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_adr 0x00005280
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_ADR 0x00005280
/* Bitmask for bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_msk 0x00000038
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_MSK 0x00000038
/* Inverted bitmask for bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_mskn 0xFFFFFFC7
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_MSKN 0xFFFFFFC7
/* Lower bit position of bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_shift 3
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_SHIFT 3
/* Width of bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_width 3
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_WIDTH 3
/* Default value of bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_default 0x0
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_DEFAULT 0x0
/* RX vl_en{F} Bitfield Definitions
* Preprocessor definitions for the bitfield "vl_en{F}".
@@ -1018,17 +1024,17 @@
*/
/* Register address for bitfield vl_en{F} */
-#define rpf_vl_en_f_adr(filter) (0x00005290 + (filter) * 0x4)
+#define HW_ATL_RPF_VL_EN_F_ADR(filter) (0x00005290 + (filter) * 0x4)
/* Bitmask for bitfield vl_en{F} */
-#define rpf_vl_en_f_msk 0x80000000
+#define HW_ATL_RPF_VL_EN_F_MSK 0x80000000
/* Inverted bitmask for bitfield vl_en{F} */
-#define rpf_vl_en_f_mskn 0x7FFFFFFF
+#define HW_ATL_RPF_VL_EN_F_MSKN 0x7FFFFFFF
/* Lower bit position of bitfield vl_en{F} */
-#define rpf_vl_en_f_shift 31
+#define HW_ATL_RPF_VL_EN_F_SHIFT 31
/* Width of bitfield vl_en{F} */
-#define rpf_vl_en_f_width 1
+#define HW_ATL_RPF_VL_EN_F_WIDTH 1
/* Default value of bitfield vl_en{F} */
-#define rpf_vl_en_f_default 0x0
+#define HW_ATL_RPF_VL_EN_F_DEFAULT 0x0
/* RX vl_act{F}[2:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "vl_act{F}[2:0]".
@@ -1037,17 +1043,17 @@
*/
/* Register address for bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_adr(filter) (0x00005290 + (filter) * 0x4)
+#define HW_ATL_RPF_VL_ACT_F_ADR(filter) (0x00005290 + (filter) * 0x4)
/* Bitmask for bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_msk 0x00070000
+#define HW_ATL_RPF_VL_ACT_F_MSK 0x00070000
/* Inverted bitmask for bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_mskn 0xFFF8FFFF
+#define HW_ATL_RPF_VL_ACT_F_MSKN 0xFFF8FFFF
/* Lower bit position of bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_shift 16
+#define HW_ATL_RPF_VL_ACT_F_SHIFT 16
/* Width of bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_width 3
+#define HW_ATL_RPF_VL_ACT_F_WIDTH 3
/* Default value of bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_default 0x0
+#define HW_ATL_RPF_VL_ACT_F_DEFAULT 0x0
/* RX vl_id{F}[B:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "vl_id{F}[B:0]".
@@ -1056,17 +1062,17 @@
*/
/* Register address for bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_adr(filter) (0x00005290 + (filter) * 0x4)
+#define HW_ATL_RPF_VL_ID_F_ADR(filter) (0x00005290 + (filter) * 0x4)
/* Bitmask for bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_msk 0x00000FFF
+#define HW_ATL_RPF_VL_ID_F_MSK 0x00000FFF
/* Inverted bitmask for bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_mskn 0xFFFFF000
+#define HW_ATL_RPF_VL_ID_F_MSKN 0xFFFFF000
/* Lower bit position of bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_shift 0
+#define HW_ATL_RPF_VL_ID_F_SHIFT 0
/* Width of bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_width 12
+#define HW_ATL_RPF_VL_ID_F_WIDTH 12
/* Default value of bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_default 0x0
+#define HW_ATL_RPF_VL_ID_F_DEFAULT 0x0
/* RX et_en{F} Bitfield Definitions
* Preprocessor definitions for the bitfield "et_en{F}".
@@ -1075,17 +1081,17 @@
*/
/* Register address for bitfield et_en{F} */
-#define rpf_et_en_f_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_EN_F_ADR(filter) (0x00005300 + (filter) * 0x4)
/* Bitmask for bitfield et_en{F} */
-#define rpf_et_en_f_msk 0x80000000
+#define HW_ATL_RPF_ET_EN_F_MSK 0x80000000
/* Inverted bitmask for bitfield et_en{F} */
-#define rpf_et_en_f_mskn 0x7FFFFFFF
+#define HW_ATL_RPF_ET_EN_F_MSKN 0x7FFFFFFF
/* Lower bit position of bitfield et_en{F} */
-#define rpf_et_en_f_shift 31
+#define HW_ATL_RPF_ET_EN_F_SHIFT 31
/* Width of bitfield et_en{F} */
-#define rpf_et_en_f_width 1
+#define HW_ATL_RPF_ET_EN_F_WIDTH 1
/* Default value of bitfield et_en{F} */
-#define rpf_et_en_f_default 0x0
+#define HW_ATL_RPF_ET_EN_F_DEFAULT 0x0
/* rx et_en{f} bitfield definitions
* preprocessor definitions for the bitfield "et_en{f}".
@@ -1094,17 +1100,17 @@
*/
/* register address for bitfield et_en{f} */
-#define rpf_et_enf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_ENF_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_en{f} */
-#define rpf_et_enf_msk 0x80000000
+#define HW_ATL_RPF_ET_ENF_MSK 0x80000000
/* inverted bitmask for bitfield et_en{f} */
-#define rpf_et_enf_mskn 0x7fffffff
+#define HW_ATL_RPF_ET_ENF_MSKN 0x7fffffff
/* lower bit position of bitfield et_en{f} */
-#define rpf_et_enf_shift 31
+#define HW_ATL_RPF_ET_ENF_SHIFT 31
/* width of bitfield et_en{f} */
-#define rpf_et_enf_width 1
+#define HW_ATL_RPF_ET_ENF_WIDTH 1
/* default value of bitfield et_en{f} */
-#define rpf_et_enf_default 0x0
+#define HW_ATL_RPF_ET_ENF_DEFAULT 0x0
/* rx et_up{f}_en bitfield definitions
* preprocessor definitions for the bitfield "et_up{f}_en".
@@ -1113,17 +1119,17 @@
*/
/* register address for bitfield et_up{f}_en */
-#define rpf_et_upfen_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_UPFEN_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_up{f}_en */
-#define rpf_et_upfen_msk 0x40000000
+#define HW_ATL_RPF_ET_UPFEN_MSK 0x40000000
/* inverted bitmask for bitfield et_up{f}_en */
-#define rpf_et_upfen_mskn 0xbfffffff
+#define HW_ATL_RPF_ET_UPFEN_MSKN 0xbfffffff
/* lower bit position of bitfield et_up{f}_en */
-#define rpf_et_upfen_shift 30
+#define HW_ATL_RPF_ET_UPFEN_SHIFT 30
/* width of bitfield et_up{f}_en */
-#define rpf_et_upfen_width 1
+#define HW_ATL_RPF_ET_UPFEN_WIDTH 1
/* default value of bitfield et_up{f}_en */
-#define rpf_et_upfen_default 0x0
+#define HW_ATL_RPF_ET_UPFEN_DEFAULT 0x0
/* rx et_rxq{f}_en bitfield definitions
* preprocessor definitions for the bitfield "et_rxq{f}_en".
@@ -1132,17 +1138,17 @@
*/
/* register address for bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_RXQFEN_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_msk 0x20000000
+#define HW_ATL_RPF_ET_RXQFEN_MSK 0x20000000
/* inverted bitmask for bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_mskn 0xdfffffff
+#define HW_ATL_RPF_ET_RXQFEN_MSKN 0xdfffffff
/* lower bit position of bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_shift 29
+#define HW_ATL_RPF_ET_RXQFEN_SHIFT 29
/* width of bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_width 1
+#define HW_ATL_RPF_ET_RXQFEN_WIDTH 1
/* default value of bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_default 0x0
+#define HW_ATL_RPF_ET_RXQFEN_DEFAULT 0x0
/* rx et_up{f}[2:0] bitfield definitions
* preprocessor definitions for the bitfield "et_up{f}[2:0]".
@@ -1151,17 +1157,17 @@
*/
/* register address for bitfield et_up{f}[2:0] */
-#define rpf_et_upf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_UPF_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_up{f}[2:0] */
-#define rpf_et_upf_msk 0x1c000000
+#define HW_ATL_RPF_ET_UPF_MSK 0x1c000000
/* inverted bitmask for bitfield et_up{f}[2:0] */
-#define rpf_et_upf_mskn 0xe3ffffff
+#define HW_ATL_RPF_ET_UPF_MSKN 0xe3ffffff
/* lower bit position of bitfield et_up{f}[2:0] */
-#define rpf_et_upf_shift 26
+#define HW_ATL_RPF_ET_UPF_SHIFT 26
/* width of bitfield et_up{f}[2:0] */
-#define rpf_et_upf_width 3
+#define HW_ATL_RPF_ET_UPF_WIDTH 3
/* default value of bitfield et_up{f}[2:0] */
-#define rpf_et_upf_default 0x0
+#define HW_ATL_RPF_ET_UPF_DEFAULT 0x0
/* rx et_rxq{f}[4:0] bitfield definitions
* preprocessor definitions for the bitfield "et_rxq{f}[4:0]".
@@ -1170,17 +1176,17 @@
*/
/* register address for bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_RXQF_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_msk 0x01f00000
+#define HW_ATL_RPF_ET_RXQF_MSK 0x01f00000
/* inverted bitmask for bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_mskn 0xfe0fffff
+#define HW_ATL_RPF_ET_RXQF_MSKN 0xfe0fffff
/* lower bit position of bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_shift 20
+#define HW_ATL_RPF_ET_RXQF_SHIFT 20
/* width of bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_width 5
+#define HW_ATL_RPF_ET_RXQF_WIDTH 5
/* default value of bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_default 0x0
+#define HW_ATL_RPF_ET_RXQF_DEFAULT 0x0
/* rx et_mng_rxq{f} bitfield definitions
* preprocessor definitions for the bitfield "et_mng_rxq{f}".
@@ -1189,17 +1195,17 @@
*/
/* register address for bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_MNG_RXQF_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_msk 0x00080000
+#define HW_ATL_RPF_ET_MNG_RXQF_MSK 0x00080000
/* inverted bitmask for bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_mskn 0xfff7ffff
+#define HW_ATL_RPF_ET_MNG_RXQF_MSKN 0xfff7ffff
/* lower bit position of bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_shift 19
+#define HW_ATL_RPF_ET_MNG_RXQF_SHIFT 19
/* width of bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_width 1
+#define HW_ATL_RPF_ET_MNG_RXQF_WIDTH 1
/* default value of bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_default 0x0
+#define HW_ATL_RPF_ET_MNG_RXQF_DEFAULT 0x0
/* rx et_act{f}[2:0] bitfield definitions
* preprocessor definitions for the bitfield "et_act{f}[2:0]".
@@ -1208,17 +1214,17 @@
*/
/* register address for bitfield et_act{f}[2:0] */
-#define rpf_et_actf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_ACTF_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_act{f}[2:0] */
-#define rpf_et_actf_msk 0x00070000
+#define HW_ATL_RPF_ET_ACTF_MSK 0x00070000
/* inverted bitmask for bitfield et_act{f}[2:0] */
-#define rpf_et_actf_mskn 0xfff8ffff
+#define HW_ATL_RPF_ET_ACTF_MSKN 0xfff8ffff
/* lower bit position of bitfield et_act{f}[2:0] */
-#define rpf_et_actf_shift 16
+#define HW_ATL_RPF_ET_ACTF_SHIFT 16
/* width of bitfield et_act{f}[2:0] */
-#define rpf_et_actf_width 3
+#define HW_ATL_RPF_ET_ACTF_WIDTH 3
/* default value of bitfield et_act{f}[2:0] */
-#define rpf_et_actf_default 0x0
+#define HW_ATL_RPF_ET_ACTF_DEFAULT 0x0
/* rx et_val{f}[f:0] bitfield definitions
* preprocessor definitions for the bitfield "et_val{f}[f:0]".
@@ -1227,17 +1233,17 @@
*/
/* register address for bitfield et_val{f}[f:0] */
-#define rpf_et_valf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_VALF_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_val{f}[f:0] */
-#define rpf_et_valf_msk 0x0000ffff
+#define HW_ATL_RPF_ET_VALF_MSK 0x0000ffff
/* inverted bitmask for bitfield et_val{f}[f:0] */
-#define rpf_et_valf_mskn 0xffff0000
+#define HW_ATL_RPF_ET_VALF_MSKN 0xffff0000
/* lower bit position of bitfield et_val{f}[f:0] */
-#define rpf_et_valf_shift 0
+#define HW_ATL_RPF_ET_VALF_SHIFT 0
/* width of bitfield et_val{f}[f:0] */
-#define rpf_et_valf_width 16
+#define HW_ATL_RPF_ET_VALF_WIDTH 16
/* default value of bitfield et_val{f}[f:0] */
-#define rpf_et_valf_default 0x0
+#define HW_ATL_RPF_ET_VALF_DEFAULT 0x0
/* rx ipv4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "ipv4_chk_en".
@@ -1245,17 +1251,17 @@
*/
/* register address for bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_adr 0x00005580
+#define HW_ATL_RPO_IPV4CHK_EN_ADR 0x00005580
/* bitmask for bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_msk 0x00000002
+#define HW_ATL_RPO_IPV4CHK_EN_MSK 0x00000002
/* inverted bitmask for bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_mskn 0xfffffffd
+#define HW_ATL_RPO_IPV4CHK_EN_MSKN 0xfffffffd
/* lower bit position of bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_shift 1
+#define HW_ATL_RPO_IPV4CHK_EN_SHIFT 1
/* width of bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_width 1
+#define HW_ATL_RPO_IPV4CHK_EN_WIDTH 1
/* default value of bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_default 0x0
+#define HW_ATL_RPO_IPV4CHK_EN_DEFAULT 0x0
/* rx desc{d}_vl_strip bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_vl_strip".
@@ -1264,17 +1270,18 @@
*/
/* register address for bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RPO_DESCDVL_STRIP_ADR(descriptor) \
+ (0x00005b08 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_msk 0x20000000
+#define HW_ATL_RPO_DESCDVL_STRIP_MSK 0x20000000
/* inverted bitmask for bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_mskn 0xdfffffff
+#define HW_ATL_RPO_DESCDVL_STRIP_MSKN 0xdfffffff
/* lower bit position of bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_shift 29
+#define HW_ATL_RPO_DESCDVL_STRIP_SHIFT 29
/* width of bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_width 1
+#define HW_ATL_RPO_DESCDVL_STRIP_WIDTH 1
/* default value of bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_default 0x0
+#define HW_ATL_RPO_DESCDVL_STRIP_DEFAULT 0x0
/* rx l4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "l4_chk_en".
@@ -1282,17 +1289,17 @@
*/
/* register address for bitfield l4_chk_en */
-#define rpol4chk_en_adr 0x00005580
+#define HW_ATL_RPOL4CHK_EN_ADR 0x00005580
/* bitmask for bitfield l4_chk_en */
-#define rpol4chk_en_msk 0x00000001
+#define HW_ATL_RPOL4CHK_EN_MSK 0x00000001
/* inverted bitmask for bitfield l4_chk_en */
-#define rpol4chk_en_mskn 0xfffffffe
+#define HW_ATL_RPOL4CHK_EN_MSKN 0xfffffffe
/* lower bit position of bitfield l4_chk_en */
-#define rpol4chk_en_shift 0
+#define HW_ATL_RPOL4CHK_EN_SHIFT 0
/* width of bitfield l4_chk_en */
-#define rpol4chk_en_width 1
+#define HW_ATL_RPOL4CHK_EN_WIDTH 1
/* default value of bitfield l4_chk_en */
-#define rpol4chk_en_default 0x0
+#define HW_ATL_RPOL4CHK_EN_DEFAULT 0x0
/* rx reg_res_dsbl bitfield definitions
* preprocessor definitions for the bitfield "reg_res_dsbl".
@@ -1300,17 +1307,17 @@
*/
/* register address for bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_adr 0x00005000
+#define HW_ATL_RX_REG_RES_DSBL_ADR 0x00005000
/* bitmask for bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_msk 0x20000000
+#define HW_ATL_RX_REG_RES_DSBL_MSK 0x20000000
/* inverted bitmask for bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_mskn 0xdfffffff
+#define HW_ATL_RX_REG_RES_DSBL_MSKN 0xdfffffff
/* lower bit position of bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_shift 29
+#define HW_ATL_RX_REG_RES_DSBL_SHIFT 29
/* width of bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_width 1
+#define HW_ATL_RX_REG_RES_DSBL_WIDTH 1
/* default value of bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_default 0x1
+#define HW_ATL_RX_REG_RES_DSBL_DEFAULT 0x1
/* tx dca{d}_cpuid[7:0] bitfield definitions
* preprocessor definitions for the bitfield "dca{d}_cpuid[7:0]".
@@ -1319,17 +1326,17 @@
*/
/* register address for bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_adr(dca) (0x00008400 + (dca) * 0x4)
+#define HW_ATL_TDM_DCADCPUID_ADR(dca) (0x00008400 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_msk 0x000000ff
+#define HW_ATL_TDM_DCADCPUID_MSK 0x000000ff
/* inverted bitmask for bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_mskn 0xffffff00
+#define HW_ATL_TDM_DCADCPUID_MSKN 0xffffff00
/* lower bit position of bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_shift 0
+#define HW_ATL_TDM_DCADCPUID_SHIFT 0
/* width of bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_width 8
+#define HW_ATL_TDM_DCADCPUID_WIDTH 8
/* default value of bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_default 0x0
+#define HW_ATL_TDM_DCADCPUID_DEFAULT 0x0
/* tx lso_en[1f:0] bitfield definitions
* preprocessor definitions for the bitfield "lso_en[1f:0]".
@@ -1337,17 +1344,17 @@
*/
/* register address for bitfield lso_en[1f:0] */
-#define tdm_lso_en_adr 0x00007810
+#define HW_ATL_TDM_LSO_EN_ADR 0x00007810
/* bitmask for bitfield lso_en[1f:0] */
-#define tdm_lso_en_msk 0xffffffff
+#define HW_ATL_TDM_LSO_EN_MSK 0xffffffff
/* inverted bitmask for bitfield lso_en[1f:0] */
-#define tdm_lso_en_mskn 0x00000000
+#define HW_ATL_TDM_LSO_EN_MSKN 0x00000000
/* lower bit position of bitfield lso_en[1f:0] */
-#define tdm_lso_en_shift 0
+#define HW_ATL_TDM_LSO_EN_SHIFT 0
/* width of bitfield lso_en[1f:0] */
-#define tdm_lso_en_width 32
+#define HW_ATL_TDM_LSO_EN_WIDTH 32
/* default value of bitfield lso_en[1f:0] */
-#define tdm_lso_en_default 0x0
+#define HW_ATL_TDM_LSO_EN_DEFAULT 0x0
/* tx dca_en bitfield definitions
* preprocessor definitions for the bitfield "dca_en".
@@ -1355,17 +1362,17 @@
*/
/* register address for bitfield dca_en */
-#define tdm_dca_en_adr 0x00008480
+#define HW_ATL_TDM_DCA_EN_ADR 0x00008480
/* bitmask for bitfield dca_en */
-#define tdm_dca_en_msk 0x80000000
+#define HW_ATL_TDM_DCA_EN_MSK 0x80000000
/* inverted bitmask for bitfield dca_en */
-#define tdm_dca_en_mskn 0x7fffffff
+#define HW_ATL_TDM_DCA_EN_MSKN 0x7fffffff
/* lower bit position of bitfield dca_en */
-#define tdm_dca_en_shift 31
+#define HW_ATL_TDM_DCA_EN_SHIFT 31
/* width of bitfield dca_en */
-#define tdm_dca_en_width 1
+#define HW_ATL_TDM_DCA_EN_WIDTH 1
/* default value of bitfield dca_en */
-#define tdm_dca_en_default 0x1
+#define HW_ATL_TDM_DCA_EN_DEFAULT 0x1
/* tx dca_mode[3:0] bitfield definitions
* preprocessor definitions for the bitfield "dca_mode[3:0]".
@@ -1373,17 +1380,17 @@
*/
/* register address for bitfield dca_mode[3:0] */
-#define tdm_dca_mode_adr 0x00008480
+#define HW_ATL_TDM_DCA_MODE_ADR 0x00008480
/* bitmask for bitfield dca_mode[3:0] */
-#define tdm_dca_mode_msk 0x0000000f
+#define HW_ATL_TDM_DCA_MODE_MSK 0x0000000f
/* inverted bitmask for bitfield dca_mode[3:0] */
-#define tdm_dca_mode_mskn 0xfffffff0
+#define HW_ATL_TDM_DCA_MODE_MSKN 0xfffffff0
/* lower bit position of bitfield dca_mode[3:0] */
-#define tdm_dca_mode_shift 0
+#define HW_ATL_TDM_DCA_MODE_SHIFT 0
/* width of bitfield dca_mode[3:0] */
-#define tdm_dca_mode_width 4
+#define HW_ATL_TDM_DCA_MODE_WIDTH 4
/* default value of bitfield dca_mode[3:0] */
-#define tdm_dca_mode_default 0x0
+#define HW_ATL_TDM_DCA_MODE_DEFAULT 0x0
/* tx dca{d}_desc_en bitfield definitions
* preprocessor definitions for the bitfield "dca{d}_desc_en".
@@ -1392,17 +1399,17 @@
*/
/* register address for bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_adr(dca) (0x00008400 + (dca) * 0x4)
+#define HW_ATL_TDM_DCADDESC_EN_ADR(dca) (0x00008400 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_msk 0x80000000
+#define HW_ATL_TDM_DCADDESC_EN_MSK 0x80000000
/* inverted bitmask for bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_mskn 0x7fffffff
+#define HW_ATL_TDM_DCADDESC_EN_MSKN 0x7fffffff
/* lower bit position of bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_shift 31
+#define HW_ATL_TDM_DCADDESC_EN_SHIFT 31
/* width of bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_width 1
+#define HW_ATL_TDM_DCADDESC_EN_WIDTH 1
/* default value of bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_default 0x0
+#define HW_ATL_TDM_DCADDESC_EN_DEFAULT 0x0
/* tx desc{d}_en bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_en".
@@ -1411,17 +1418,17 @@
*/
/* register address for bitfield desc{d}_en */
-#define tdm_descden_adr(descriptor) (0x00007c08 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESCDEN_ADR(descriptor) (0x00007c08 + (descriptor) * 0x40)
/* bitmask for bitfield desc{d}_en */
-#define tdm_descden_msk 0x80000000
+#define HW_ATL_TDM_DESCDEN_MSK 0x80000000
/* inverted bitmask for bitfield desc{d}_en */
-#define tdm_descden_mskn 0x7fffffff
+#define HW_ATL_TDM_DESCDEN_MSKN 0x7fffffff
/* lower bit position of bitfield desc{d}_en */
-#define tdm_descden_shift 31
+#define HW_ATL_TDM_DESCDEN_SHIFT 31
/* width of bitfield desc{d}_en */
-#define tdm_descden_width 1
+#define HW_ATL_TDM_DESCDEN_WIDTH 1
/* default value of bitfield desc{d}_en */
-#define tdm_descden_default 0x0
+#define HW_ATL_TDM_DESCDEN_DEFAULT 0x0
/* tx desc{d}_hd[c:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_hd[c:0]".
@@ -1430,15 +1437,15 @@
*/
/* register address for bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_adr(descriptor) (0x00007c0c + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESCDHD_ADR(descriptor) (0x00007c0c + (descriptor) * 0x40)
/* bitmask for bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_msk 0x00001fff
+#define HW_ATL_TDM_DESCDHD_MSK 0x00001fff
/* inverted bitmask for bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_mskn 0xffffe000
+#define HW_ATL_TDM_DESCDHD_MSKN 0xffffe000
/* lower bit position of bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_shift 0
+#define HW_ATL_TDM_DESCDHD_SHIFT 0
/* width of bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_width 13
+#define HW_ATL_TDM_DESCDHD_WIDTH 13
/* tx desc{d}_len[9:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_len[9:0]".
@@ -1447,17 +1454,17 @@
*/
/* register address for bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_adr(descriptor) (0x00007c08 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESCDLEN_ADR(descriptor) (0x00007c08 + (descriptor) * 0x40)
/* bitmask for bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_msk 0x00001ff8
+#define HW_ATL_TDM_DESCDLEN_MSK 0x00001ff8
/* inverted bitmask for bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_mskn 0xffffe007
+#define HW_ATL_TDM_DESCDLEN_MSKN 0xffffe007
/* lower bit position of bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_shift 3
+#define HW_ATL_TDM_DESCDLEN_SHIFT 3
/* width of bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_width 10
+#define HW_ATL_TDM_DESCDLEN_WIDTH 10
/* default value of bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_default 0x0
+#define HW_ATL_TDM_DESCDLEN_DEFAULT 0x0
/* tx int_desc_wrb_en bitfield definitions
* preprocessor definitions for the bitfield "int_desc_wrb_en".
@@ -1465,17 +1472,17 @@
*/
/* register address for bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_adr 0x00007b40
+#define HW_ATL_TDM_INT_DESC_WRB_EN_ADR 0x00007b40
/* bitmask for bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_msk 0x00000002
+#define HW_ATL_TDM_INT_DESC_WRB_EN_MSK 0x00000002
/* inverted bitmask for bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_mskn 0xfffffffd
+#define HW_ATL_TDM_INT_DESC_WRB_EN_MSKN 0xfffffffd
/* lower bit position of bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_shift 1
+#define HW_ATL_TDM_INT_DESC_WRB_EN_SHIFT 1
/* width of bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_width 1
+#define HW_ATL_TDM_INT_DESC_WRB_EN_WIDTH 1
/* default value of bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_default 0x0
+#define HW_ATL_TDM_INT_DESC_WRB_EN_DEFAULT 0x0
/* tx desc{d}_wrb_thresh[6:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_wrb_thresh[6:0]".
@@ -1484,17 +1491,18 @@
*/
/* register address for bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_adr(descriptor) (0x00007c18 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESCDWRB_THRESH_ADR(descriptor) \
+ (0x00007c18 + (descriptor) * 0x40)
/* bitmask for bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_msk 0x00007f00
+#define HW_ATL_TDM_DESCDWRB_THRESH_MSK 0x00007f00
/* inverted bitmask for bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_mskn 0xffff80ff
+#define HW_ATL_TDM_DESCDWRB_THRESH_MSKN 0xffff80ff
/* lower bit position of bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_shift 8
+#define HW_ATL_TDM_DESCDWRB_THRESH_SHIFT 8
/* width of bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_width 7
+#define HW_ATL_TDM_DESCDWRB_THRESH_WIDTH 7
/* default value of bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_default 0x0
+#define HW_ATL_TDM_DESCDWRB_THRESH_DEFAULT 0x0
/* tx lso_tcp_flag_first[b:0] bitfield definitions
* preprocessor definitions for the bitfield "lso_tcp_flag_first[b:0]".
@@ -1502,17 +1510,17 @@
*/
/* register address for bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_adr 0x00007820
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_ADR 0x00007820
/* bitmask for bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_msk 0x00000fff
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSK 0x00000fff
/* inverted bitmask for bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_mskn 0xfffff000
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSKN 0xfffff000
/* lower bit position of bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_shift 0
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_SHIFT 0
/* width of bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_width 12
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_WIDTH 12
/* default value of bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_default 0x0
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_DEFAULT 0x0
/* tx lso_tcp_flag_last[b:0] bitfield definitions
* preprocessor definitions for the bitfield "lso_tcp_flag_last[b:0]".
@@ -1520,17 +1528,17 @@
*/
/* register address for bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_adr 0x00007824
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_ADR 0x00007824
/* bitmask for bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_msk 0x00000fff
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_MSK 0x00000fff
/* inverted bitmask for bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_mskn 0xfffff000
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_MSKN 0xfffff000
/* lower bit position of bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_shift 0
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_SHIFT 0
/* width of bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_width 12
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_WIDTH 12
/* default value of bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_default 0x0
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_DEFAULT 0x0
/* tx lso_tcp_flag_mid[b:0] bitfield definitions
* preprocessor definitions for the bitfield "lso_tcp_flag_mid[b:0]".
@@ -1538,17 +1546,17 @@
*/
/* Register address for bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_adr 0x00005598
+#define HW_ATL_RPO_LRO_RSC_MAX_ADR 0x00005598
/* Bitmask for bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_msk 0xFFFFFFFF
+#define HW_ATL_RPO_LRO_RSC_MAX_MSK 0xFFFFFFFF
/* Inverted bitmask for bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_mskn 0x00000000
+#define HW_ATL_RPO_LRO_RSC_MAX_MSKN 0x00000000
/* Lower bit position of bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_shift 0
+#define HW_ATL_RPO_LRO_RSC_MAX_SHIFT 0
/* Width of bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_width 32
+#define HW_ATL_RPO_LRO_RSC_MAX_WIDTH 32
/* Default value of bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_default 0x0
+#define HW_ATL_RPO_LRO_RSC_MAX_DEFAULT 0x0
/* RX lro_en[1F:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_en[1F:0]".
@@ -1556,17 +1564,17 @@
*/
/* Register address for bitfield lro_en[1F:0] */
-#define rpo_lro_en_adr 0x00005590
+#define HW_ATL_RPO_LRO_EN_ADR 0x00005590
/* Bitmask for bitfield lro_en[1F:0] */
-#define rpo_lro_en_msk 0xFFFFFFFF
+#define HW_ATL_RPO_LRO_EN_MSK 0xFFFFFFFF
/* Inverted bitmask for bitfield lro_en[1F:0] */
-#define rpo_lro_en_mskn 0x00000000
+#define HW_ATL_RPO_LRO_EN_MSKN 0x00000000
/* Lower bit position of bitfield lro_en[1F:0] */
-#define rpo_lro_en_shift 0
+#define HW_ATL_RPO_LRO_EN_SHIFT 0
/* Width of bitfield lro_en[1F:0] */
-#define rpo_lro_en_width 32
+#define HW_ATL_RPO_LRO_EN_WIDTH 32
/* Default value of bitfield lro_en[1F:0] */
-#define rpo_lro_en_default 0x0
+#define HW_ATL_RPO_LRO_EN_DEFAULT 0x0
/* RX lro_ptopt_en Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_ptopt_en".
@@ -1574,17 +1582,17 @@
*/
/* Register address for bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_adr 0x00005594
+#define HW_ATL_RPO_LRO_PTOPT_EN_ADR 0x00005594
/* Bitmask for bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_msk 0x00008000
+#define HW_ATL_RPO_LRO_PTOPT_EN_MSK 0x00008000
/* Inverted bitmask for bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_mskn 0xFFFF7FFF
+#define HW_ATL_RPO_LRO_PTOPT_EN_MSKN 0xFFFF7FFF
/* Lower bit position of bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_shift 15
+#define HW_ATL_RPO_LRO_PTOPT_EN_SHIFT 15
/* Width of bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_width 1
+#define HW_ATL_RPO_LRO_PTOPT_EN_WIDTH 1
/* Default value of bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_defalt 0x1
+#define HW_ATL_RPO_LRO_PTOPT_EN_DEFALT 0x1
/* RX lro_q_ses_lmt Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_q_ses_lmt".
@@ -1592,17 +1600,17 @@
*/
/* Register address for bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_adr 0x00005594
+#define HW_ATL_RPO_LRO_QSES_LMT_ADR 0x00005594
/* Bitmask for bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_msk 0x00003000
+#define HW_ATL_RPO_LRO_QSES_LMT_MSK 0x00003000
/* Inverted bitmask for bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_mskn 0xFFFFCFFF
+#define HW_ATL_RPO_LRO_QSES_LMT_MSKN 0xFFFFCFFF
/* Lower bit position of bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_shift 12
+#define HW_ATL_RPO_LRO_QSES_LMT_SHIFT 12
/* Width of bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_width 2
+#define HW_ATL_RPO_LRO_QSES_LMT_WIDTH 2
/* Default value of bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_default 0x1
+#define HW_ATL_RPO_LRO_QSES_LMT_DEFAULT 0x1
/* RX lro_tot_dsc_lmt[1:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_tot_dsc_lmt[1:0]".
@@ -1610,17 +1618,17 @@
*/
/* Register address for bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_adr 0x00005594
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_ADR 0x00005594
/* Bitmask for bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_msk 0x00000060
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_MSK 0x00000060
/* Inverted bitmask for bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_mskn 0xFFFFFF9F
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_MSKN 0xFFFFFF9F
/* Lower bit position of bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_shift 5
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_SHIFT 5
/* Width of bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_width 2
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_WIDTH 2
/* Default value of bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_defalt 0x1
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_DEFALT 0x1
/* RX lro_pkt_min[4:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_pkt_min[4:0]".
@@ -1628,22 +1636,22 @@
*/
/* Register address for bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_adr 0x00005594
+#define HW_ATL_RPO_LRO_PKT_MIN_ADR 0x00005594
/* Bitmask for bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_msk 0x0000001F
+#define HW_ATL_RPO_LRO_PKT_MIN_MSK 0x0000001F
/* Inverted bitmask for bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_mskn 0xFFFFFFE0
+#define HW_ATL_RPO_LRO_PKT_MIN_MSKN 0xFFFFFFE0
/* Lower bit position of bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_shift 0
+#define HW_ATL_RPO_LRO_PKT_MIN_SHIFT 0
/* Width of bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_width 5
+#define HW_ATL_RPO_LRO_PKT_MIN_WIDTH 5
/* Default value of bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_default 0x8
+#define HW_ATL_RPO_LRO_PKT_MIN_DEFAULT 0x8
/* Width of bitfield lro{L}_des_max[1:0] */
-#define rpo_lro_ldes_max_width 2
+#define HW_ATL_RPO_LRO_LDES_MAX_WIDTH 2
/* Default value of bitfield lro{L}_des_max[1:0] */
-#define rpo_lro_ldes_max_default 0x0
+#define HW_ATL_RPO_LRO_LDES_MAX_DEFAULT 0x0
/* RX lro_tb_div[11:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_tb_div[11:0]".
@@ -1651,17 +1659,17 @@
*/
/* Register address for bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_adr 0x00005620
+#define HW_ATL_RPO_LRO_TB_DIV_ADR 0x00005620
/* Bitmask for bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_msk 0xFFF00000
+#define HW_ATL_RPO_LRO_TB_DIV_MSK 0xFFF00000
/* Inverted bitmask for bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_mskn 0x000FFFFF
+#define HW_ATL_RPO_LRO_TB_DIV_MSKN 0x000FFFFF
/* Lower bit position of bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_shift 20
+#define HW_ATL_RPO_LRO_TB_DIV_SHIFT 20
/* Width of bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_width 12
+#define HW_ATL_RPO_LRO_TB_DIV_WIDTH 12
/* Default value of bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_default 0xC35
+#define HW_ATL_RPO_LRO_TB_DIV_DEFAULT 0xC35
/* RX lro_ina_ival[9:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_ina_ival[9:0]".
@@ -1669,17 +1677,17 @@
*/
/* Register address for bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_adr 0x00005620
+#define HW_ATL_RPO_LRO_INA_IVAL_ADR 0x00005620
/* Bitmask for bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_msk 0x000FFC00
+#define HW_ATL_RPO_LRO_INA_IVAL_MSK 0x000FFC00
/* Inverted bitmask for bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_mskn 0xFFF003FF
+#define HW_ATL_RPO_LRO_INA_IVAL_MSKN 0xFFF003FF
/* Lower bit position of bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_shift 10
+#define HW_ATL_RPO_LRO_INA_IVAL_SHIFT 10
/* Width of bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_width 10
+#define HW_ATL_RPO_LRO_INA_IVAL_WIDTH 10
/* Default value of bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_default 0xA
+#define HW_ATL_RPO_LRO_INA_IVAL_DEFAULT 0xA
/* RX lro_max_ival[9:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_max_ival[9:0]".
@@ -1687,17 +1695,17 @@
*/
/* Register address for bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_adr 0x00005620
+#define HW_ATL_RPO_LRO_MAX_IVAL_ADR 0x00005620
/* Bitmask for bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_msk 0x000003FF
+#define HW_ATL_RPO_LRO_MAX_IVAL_MSK 0x000003FF
/* Inverted bitmask for bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_mskn 0xFFFFFC00
+#define HW_ATL_RPO_LRO_MAX_IVAL_MSKN 0xFFFFFC00
/* Lower bit position of bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_shift 0
+#define HW_ATL_RPO_LRO_MAX_IVAL_SHIFT 0
/* Width of bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_width 10
+#define HW_ATL_RPO_LRO_MAX_IVAL_WIDTH 10
/* Default value of bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_default 0x19
+#define HW_ATL_RPO_LRO_MAX_IVAL_DEFAULT 0x19
/* TX dca{D}_cpuid[7:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "dca{D}_cpuid[7:0]".
@@ -1706,17 +1714,17 @@
*/
/* Register address for bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_adr(dca) (0x00008400 + (dca) * 0x4)
+#define HW_ATL_TDM_DCA_DCPUID_ADR(dca) (0x00008400 + (dca) * 0x4)
/* Bitmask for bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_msk 0x000000FF
+#define HW_ATL_TDM_DCA_DCPUID_MSK 0x000000FF
/* Inverted bitmask for bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_mskn 0xFFFFFF00
+#define HW_ATL_TDM_DCA_DCPUID_MSKN 0xFFFFFF00
/* Lower bit position of bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_shift 0
+#define HW_ATL_TDM_DCA_DCPUID_SHIFT 0
/* Width of bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_width 8
+#define HW_ATL_TDM_DCA_DCPUID_WIDTH 8
/* Default value of bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_default 0x0
+#define HW_ATL_TDM_DCA_DCPUID_DEFAULT 0x0
/* TX dca{D}_desc_en Bitfield Definitions
* Preprocessor definitions for the bitfield "dca{D}_desc_en".
@@ -1725,17 +1733,17 @@
*/
/* Register address for bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_adr(dca) (0x00008400 + (dca) * 0x4)
+#define HW_ATL_TDM_DCA_DDESC_EN_ADR(dca) (0x00008400 + (dca) * 0x4)
/* Bitmask for bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_msk 0x80000000
+#define HW_ATL_TDM_DCA_DDESC_EN_MSK 0x80000000
/* Inverted bitmask for bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_mskn 0x7FFFFFFF
+#define HW_ATL_TDM_DCA_DDESC_EN_MSKN 0x7FFFFFFF
/* Lower bit position of bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_shift 31
+#define HW_ATL_TDM_DCA_DDESC_EN_SHIFT 31
/* Width of bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_width 1
+#define HW_ATL_TDM_DCA_DDESC_EN_WIDTH 1
/* Default value of bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_default 0x0
+#define HW_ATL_TDM_DCA_DDESC_EN_DEFAULT 0x0
/* TX desc{D}_en Bitfield Definitions
* Preprocessor definitions for the bitfield "desc{D}_en".
@@ -1744,17 +1752,17 @@
*/
/* Register address for bitfield desc{D}_en */
-#define tdm_desc_den_adr(descriptor) (0x00007C08 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESC_DEN_ADR(descriptor) (0x00007C08 + (descriptor) * 0x40)
/* Bitmask for bitfield desc{D}_en */
-#define tdm_desc_den_msk 0x80000000
+#define HW_ATL_TDM_DESC_DEN_MSK 0x80000000
/* Inverted bitmask for bitfield desc{D}_en */
-#define tdm_desc_den_mskn 0x7FFFFFFF
+#define HW_ATL_TDM_DESC_DEN_MSKN 0x7FFFFFFF
/* Lower bit position of bitfield desc{D}_en */
-#define tdm_desc_den_shift 31
+#define HW_ATL_TDM_DESC_DEN_SHIFT 31
/* Width of bitfield desc{D}_en */
-#define tdm_desc_den_width 1
+#define HW_ATL_TDM_DESC_DEN_WIDTH 1
/* Default value of bitfield desc{D}_en */
-#define tdm_desc_den_default 0x0
+#define HW_ATL_TDM_DESC_DEN_DEFAULT 0x0
/* TX desc{D}_hd[C:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "desc{D}_hd[C:0]".
@@ -1763,15 +1771,15 @@
*/
/* Register address for bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_adr(descriptor) (0x00007C0C + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESC_DHD_ADR(descriptor) (0x00007C0C + (descriptor) * 0x40)
/* Bitmask for bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_msk 0x00001FFF
+#define HW_ATL_TDM_DESC_DHD_MSK 0x00001FFF
/* Inverted bitmask for bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_mskn 0xFFFFE000
+#define HW_ATL_TDM_DESC_DHD_MSKN 0xFFFFE000
/* Lower bit position of bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_shift 0
+#define HW_ATL_TDM_DESC_DHD_SHIFT 0
/* Width of bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_width 13
+#define HW_ATL_TDM_DESC_DHD_WIDTH 13
/* TX desc{D}_len[9:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "desc{D}_len[9:0]".
@@ -1780,17 +1788,17 @@
*/
/* Register address for bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_adr(descriptor) (0x00007C08 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESC_DLEN_ADR(descriptor) (0x00007C08 + (descriptor) * 0x40)
/* Bitmask for bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_msk 0x00001FF8
+#define HW_ATL_TDM_DESC_DLEN_MSK 0x00001FF8
/* Inverted bitmask for bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_mskn 0xFFFFE007
+#define HW_ATL_TDM_DESC_DLEN_MSKN 0xFFFFE007
/* Lower bit position of bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_shift 3
+#define HW_ATL_TDM_DESC_DLEN_SHIFT 3
/* Width of bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_width 10
+#define HW_ATL_TDM_DESC_DLEN_WIDTH 10
/* Default value of bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_default 0x0
+#define HW_ATL_TDM_DESC_DLEN_DEFAULT 0x0
/* TX desc{D}_wrb_thresh[6:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "desc{D}_wrb_thresh[6:0]".
@@ -1799,18 +1807,18 @@
*/
/* Register address for bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_adr(descriptor) \
+#define HW_ATL_TDM_DESC_DWRB_THRESH_ADR(descriptor) \
(0x00007C18 + (descriptor) * 0x40)
/* Bitmask for bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_msk 0x00007F00
+#define HW_ATL_TDM_DESC_DWRB_THRESH_MSK 0x00007F00
/* Inverted bitmask for bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_mskn 0xFFFF80FF
+#define HW_ATL_TDM_DESC_DWRB_THRESH_MSKN 0xFFFF80FF
/* Lower bit position of bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_shift 8
+#define HW_ATL_TDM_DESC_DWRB_THRESH_SHIFT 8
/* Width of bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_width 7
+#define HW_ATL_TDM_DESC_DWRB_THRESH_WIDTH 7
/* Default value of bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_default 0x0
+#define HW_ATL_TDM_DESC_DWRB_THRESH_DEFAULT 0x0
/* TX tdm_int_mod_en Bitfield Definitions
* Preprocessor definitions for the bitfield "tdm_int_mod_en".
@@ -1818,34 +1826,34 @@
*/
/* Register address for bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_adr 0x00007B40
+#define HW_ATL_TDM_INT_MOD_EN_ADR 0x00007B40
/* Bitmask for bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_msk 0x00000010
+#define HW_ATL_TDM_INT_MOD_EN_MSK 0x00000010
/* Inverted bitmask for bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_mskn 0xFFFFFFEF
+#define HW_ATL_TDM_INT_MOD_EN_MSKN 0xFFFFFFEF
/* Lower bit position of bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_shift 4
+#define HW_ATL_TDM_INT_MOD_EN_SHIFT 4
/* Width of bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_width 1
+#define HW_ATL_TDM_INT_MOD_EN_WIDTH 1
/* Default value of bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_default 0x0
+#define HW_ATL_TDM_INT_MOD_EN_DEFAULT 0x0
/* TX lso_tcp_flag_mid[B:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lso_tcp_flag_mid[B:0]".
* PORT="pif_thm_lso_tcp_flag_mid_i[11:0]"
*/
/* register address for bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_adr 0x00007820
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_ADR 0x00007820
/* bitmask for bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_msk 0x0fff0000
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_MSK 0x0fff0000
/* inverted bitmask for bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_mskn 0xf000ffff
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_MSKN 0xf000ffff
/* lower bit position of bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_shift 16
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_SHIFT 16
/* width of bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_width 12
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_WIDTH 12
/* default value of bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_default 0x0
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_DEFAULT 0x0
/* tx tx_buf_en bitfield definitions
* preprocessor definitions for the bitfield "tx_buf_en".
@@ -1853,17 +1861,17 @@
*/
/* register address for bitfield tx_buf_en */
-#define tpb_tx_buf_en_adr 0x00007900
+#define HW_ATL_TPB_TX_BUF_EN_ADR 0x00007900
/* bitmask for bitfield tx_buf_en */
-#define tpb_tx_buf_en_msk 0x00000001
+#define HW_ATL_TPB_TX_BUF_EN_MSK 0x00000001
/* inverted bitmask for bitfield tx_buf_en */
-#define tpb_tx_buf_en_mskn 0xfffffffe
+#define HW_ATL_TPB_TX_BUF_EN_MSKN 0xfffffffe
/* lower bit position of bitfield tx_buf_en */
-#define tpb_tx_buf_en_shift 0
+#define HW_ATL_TPB_TX_BUF_EN_SHIFT 0
/* width of bitfield tx_buf_en */
-#define tpb_tx_buf_en_width 1
+#define HW_ATL_TPB_TX_BUF_EN_WIDTH 1
/* default value of bitfield tx_buf_en */
-#define tpb_tx_buf_en_default 0x0
+#define HW_ATL_TPB_TX_BUF_EN_DEFAULT 0x0
/* tx tx{b}_hi_thresh[c:0] bitfield definitions
* preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]".
@@ -1872,17 +1880,17 @@
*/
/* register address for bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10)
+#define HW_ATL_TPB_TXBHI_THRESH_ADR(buffer) (0x00007914 + (buffer) * 0x10)
/* bitmask for bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_msk 0x1fff0000
+#define HW_ATL_TPB_TXBHI_THRESH_MSK 0x1fff0000
/* inverted bitmask for bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_mskn 0xe000ffff
+#define HW_ATL_TPB_TXBHI_THRESH_MSKN 0xe000ffff
/* lower bit position of bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_shift 16
+#define HW_ATL_TPB_TXBHI_THRESH_SHIFT 16
/* width of bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_width 13
+#define HW_ATL_TPB_TXBHI_THRESH_WIDTH 13
/* default value of bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_default 0x0
+#define HW_ATL_TPB_TXBHI_THRESH_DEFAULT 0x0
/* tx tx{b}_lo_thresh[c:0] bitfield definitions
* preprocessor definitions for the bitfield "tx{b}_lo_thresh[c:0]".
@@ -1891,17 +1899,17 @@
*/
/* register address for bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10)
+#define HW_ATL_TPB_TXBLO_THRESH_ADR(buffer) (0x00007914 + (buffer) * 0x10)
/* bitmask for bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_msk 0x00001fff
+#define HW_ATL_TPB_TXBLO_THRESH_MSK 0x00001fff
/* inverted bitmask for bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_mskn 0xffffe000
+#define HW_ATL_TPB_TXBLO_THRESH_MSKN 0xffffe000
/* lower bit position of bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_shift 0
+#define HW_ATL_TPB_TXBLO_THRESH_SHIFT 0
/* width of bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_width 13
+#define HW_ATL_TPB_TXBLO_THRESH_WIDTH 13
/* default value of bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_default 0x0
+#define HW_ATL_TPB_TXBLO_THRESH_DEFAULT 0x0
/* tx dma_sys_loopback bitfield definitions
* preprocessor definitions for the bitfield "dma_sys_loopback".
@@ -1909,17 +1917,17 @@
*/
/* register address for bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_adr 0x00007000
+#define HW_ATL_TPB_DMA_SYS_LBK_ADR 0x00007000
/* bitmask for bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_msk 0x00000040
+#define HW_ATL_TPB_DMA_SYS_LBK_MSK 0x00000040
/* inverted bitmask for bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_mskn 0xffffffbf
+#define HW_ATL_TPB_DMA_SYS_LBK_MSKN 0xffffffbf
/* lower bit position of bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_shift 6
+#define HW_ATL_TPB_DMA_SYS_LBK_SHIFT 6
/* width of bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_width 1
+#define HW_ATL_TPB_DMA_SYS_LBK_WIDTH 1
/* default value of bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_default 0x0
+#define HW_ATL_TPB_DMA_SYS_LBK_DEFAULT 0x0
/* tx tx{b}_buf_size[7:0] bitfield definitions
* preprocessor definitions for the bitfield "tx{b}_buf_size[7:0]".
@@ -1928,17 +1936,17 @@
*/
/* register address for bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_adr(buffer) (0x00007910 + (buffer) * 0x10)
+#define HW_ATL_TPB_TXBBUF_SIZE_ADR(buffer) (0x00007910 + (buffer) * 0x10)
/* bitmask for bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_msk 0x000000ff
+#define HW_ATL_TPB_TXBBUF_SIZE_MSK 0x000000ff
/* inverted bitmask for bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_mskn 0xffffff00
+#define HW_ATL_TPB_TXBBUF_SIZE_MSKN 0xffffff00
/* lower bit position of bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_shift 0
+#define HW_ATL_TPB_TXBBUF_SIZE_SHIFT 0
/* width of bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_width 8
+#define HW_ATL_TPB_TXBBUF_SIZE_WIDTH 8
/* default value of bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_default 0x0
+#define HW_ATL_TPB_TXBBUF_SIZE_DEFAULT 0x0
/* tx tx_scp_ins_en bitfield definitions
* preprocessor definitions for the bitfield "tx_scp_ins_en".
@@ -1946,17 +1954,17 @@
*/
/* register address for bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_adr 0x00007900
+#define HW_ATL_TPB_TX_SCP_INS_EN_ADR 0x00007900
/* bitmask for bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_msk 0x00000004
+#define HW_ATL_TPB_TX_SCP_INS_EN_MSK 0x00000004
/* inverted bitmask for bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_mskn 0xfffffffb
+#define HW_ATL_TPB_TX_SCP_INS_EN_MSKN 0xfffffffb
/* lower bit position of bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_shift 2
+#define HW_ATL_TPB_TX_SCP_INS_EN_SHIFT 2
/* width of bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_width 1
+#define HW_ATL_TPB_TX_SCP_INS_EN_WIDTH 1
/* default value of bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_default 0x0
+#define HW_ATL_TPB_TX_SCP_INS_EN_DEFAULT 0x0
/* tx ipv4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "ipv4_chk_en".
@@ -1964,17 +1972,17 @@
*/
/* register address for bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_adr 0x00007800
+#define HW_ATL_TPO_IPV4CHK_EN_ADR 0x00007800
/* bitmask for bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_msk 0x00000002
+#define HW_ATL_TPO_IPV4CHK_EN_MSK 0x00000002
/* inverted bitmask for bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_mskn 0xfffffffd
+#define HW_ATL_TPO_IPV4CHK_EN_MSKN 0xfffffffd
/* lower bit position of bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_shift 1
+#define HW_ATL_TPO_IPV4CHK_EN_SHIFT 1
/* width of bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_width 1
+#define HW_ATL_TPO_IPV4CHK_EN_WIDTH 1
/* default value of bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_default 0x0
+#define HW_ATL_TPO_IPV4CHK_EN_DEFAULT 0x0
/* tx l4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "l4_chk_en".
@@ -1982,17 +1990,17 @@
*/
/* register address for bitfield l4_chk_en */
-#define tpol4chk_en_adr 0x00007800
+#define HW_ATL_TPOL4CHK_EN_ADR 0x00007800
/* bitmask for bitfield l4_chk_en */
-#define tpol4chk_en_msk 0x00000001
+#define HW_ATL_TPOL4CHK_EN_MSK 0x00000001
/* inverted bitmask for bitfield l4_chk_en */
-#define tpol4chk_en_mskn 0xfffffffe
+#define HW_ATL_TPOL4CHK_EN_MSKN 0xfffffffe
/* lower bit position of bitfield l4_chk_en */
-#define tpol4chk_en_shift 0
+#define HW_ATL_TPOL4CHK_EN_SHIFT 0
/* width of bitfield l4_chk_en */
-#define tpol4chk_en_width 1
+#define HW_ATL_TPOL4CHK_EN_WIDTH 1
/* default value of bitfield l4_chk_en */
-#define tpol4chk_en_default 0x0
+#define HW_ATL_TPOL4CHK_EN_DEFAULT 0x0
/* tx pkt_sys_loopback bitfield definitions
* preprocessor definitions for the bitfield "pkt_sys_loopback".
@@ -2000,17 +2008,17 @@
*/
/* register address for bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_adr 0x00007000
+#define HW_ATL_TPO_PKT_SYS_LBK_ADR 0x00007000
/* bitmask for bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_msk 0x00000080
+#define HW_ATL_TPO_PKT_SYS_LBK_MSK 0x00000080
/* inverted bitmask for bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_mskn 0xffffff7f
+#define HW_ATL_TPO_PKT_SYS_LBK_MSKN 0xffffff7f
/* lower bit position of bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_shift 7
+#define HW_ATL_TPO_PKT_SYS_LBK_SHIFT 7
/* width of bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_width 1
+#define HW_ATL_TPO_PKT_SYS_LBK_WIDTH 1
/* default value of bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_default 0x0
+#define HW_ATL_TPO_PKT_SYS_LBK_DEFAULT 0x0
/* tx data_tc_arb_mode bitfield definitions
* preprocessor definitions for the bitfield "data_tc_arb_mode".
@@ -2018,17 +2026,17 @@
*/
/* register address for bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_adr 0x00007100
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_ADR 0x00007100
/* bitmask for bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_msk 0x00000001
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_MSK 0x00000001
/* inverted bitmask for bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_mskn 0xfffffffe
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_MSKN 0xfffffffe
/* lower bit position of bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_shift 0
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_SHIFT 0
/* width of bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_width 1
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_WIDTH 1
/* default value of bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_default 0x0
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_DEFAULT 0x0
/* tx desc_rate_ta_rst bitfield definitions
* preprocessor definitions for the bitfield "desc_rate_ta_rst".
@@ -2036,17 +2044,17 @@
*/
/* register address for bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_adr 0x00007310
+#define HW_ATL_TPS_DESC_RATE_TA_RST_ADR 0x00007310
/* bitmask for bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_msk 0x80000000
+#define HW_ATL_TPS_DESC_RATE_TA_RST_MSK 0x80000000
/* inverted bitmask for bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_mskn 0x7fffffff
+#define HW_ATL_TPS_DESC_RATE_TA_RST_MSKN 0x7fffffff
/* lower bit position of bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_shift 31
+#define HW_ATL_TPS_DESC_RATE_TA_RST_SHIFT 31
/* width of bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_width 1
+#define HW_ATL_TPS_DESC_RATE_TA_RST_WIDTH 1
/* default value of bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_default 0x0
+#define HW_ATL_TPS_DESC_RATE_TA_RST_DEFAULT 0x0
/* tx desc_rate_limit[a:0] bitfield definitions
* preprocessor definitions for the bitfield "desc_rate_limit[a:0]".
@@ -2054,17 +2062,17 @@
*/
/* register address for bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_adr 0x00007310
+#define HW_ATL_TPS_DESC_RATE_LIM_ADR 0x00007310
/* bitmask for bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_msk 0x000007ff
+#define HW_ATL_TPS_DESC_RATE_LIM_MSK 0x000007ff
/* inverted bitmask for bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_mskn 0xfffff800
+#define HW_ATL_TPS_DESC_RATE_LIM_MSKN 0xfffff800
/* lower bit position of bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_shift 0
+#define HW_ATL_TPS_DESC_RATE_LIM_SHIFT 0
/* width of bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_width 11
+#define HW_ATL_TPS_DESC_RATE_LIM_WIDTH 11
/* default value of bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_default 0x0
+#define HW_ATL_TPS_DESC_RATE_LIM_DEFAULT 0x0
/* tx desc_tc_arb_mode[1:0] bitfield definitions
* preprocessor definitions for the bitfield "desc_tc_arb_mode[1:0]".
@@ -2072,17 +2080,17 @@
*/
/* register address for bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_adr 0x00007200
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_ADR 0x00007200
/* bitmask for bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_msk 0x00000003
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_MSK 0x00000003
/* inverted bitmask for bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_mskn 0xfffffffc
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_MSKN 0xfffffffc
/* lower bit position of bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_shift 0
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_SHIFT 0
/* width of bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_width 2
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_WIDTH 2
/* default value of bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_default 0x0
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_DEFAULT 0x0
/* tx desc_tc{t}_credit_max[b:0] bitfield definitions
* preprocessor definitions for the bitfield "desc_tc{t}_credit_max[b:0]".
@@ -2091,17 +2099,17 @@
*/
/* register address for bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_adr(tc) (0x00007210 + (tc) * 0x4)
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_ADR(tc) (0x00007210 + (tc) * 0x4)
/* bitmask for bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_msk 0x0fff0000
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSK 0x0fff0000
/* inverted bitmask for bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_mskn 0xf000ffff
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSKN 0xf000ffff
/* lower bit position of bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_shift 16
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_SHIFT 16
/* width of bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_width 12
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_WIDTH 12
/* default value of bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_default 0x0
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_DEFAULT 0x0
/* tx desc_tc{t}_weight[8:0] bitfield definitions
* preprocessor definitions for the bitfield "desc_tc{t}_weight[8:0]".
@@ -2110,17 +2118,17 @@
*/
/* register address for bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_adr(tc) (0x00007210 + (tc) * 0x4)
+#define HW_ATL_TPS_DESC_TCTWEIGHT_ADR(tc) (0x00007210 + (tc) * 0x4)
/* bitmask for bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_msk 0x000001ff
+#define HW_ATL_TPS_DESC_TCTWEIGHT_MSK 0x000001ff
/* inverted bitmask for bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_mskn 0xfffffe00
+#define HW_ATL_TPS_DESC_TCTWEIGHT_MSKN 0xfffffe00
/* lower bit position of bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_shift 0
+#define HW_ATL_TPS_DESC_TCTWEIGHT_SHIFT 0
/* width of bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_width 9
+#define HW_ATL_TPS_DESC_TCTWEIGHT_WIDTH 9
/* default value of bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_default 0x0
+#define HW_ATL_TPS_DESC_TCTWEIGHT_DEFAULT 0x0
/* tx desc_vm_arb_mode bitfield definitions
* preprocessor definitions for the bitfield "desc_vm_arb_mode".
@@ -2128,17 +2136,17 @@
*/
/* register address for bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_adr 0x00007300
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_ADR 0x00007300
/* bitmask for bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_msk 0x00000001
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_MSK 0x00000001
/* inverted bitmask for bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_mskn 0xfffffffe
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_MSKN 0xfffffffe
/* lower bit position of bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_shift 0
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_SHIFT 0
/* width of bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_width 1
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_WIDTH 1
/* default value of bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_default 0x0
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_DEFAULT 0x0
/* tx data_tc{t}_credit_max[b:0] bitfield definitions
* preprocessor definitions for the bitfield "data_tc{t}_credit_max[b:0]".
@@ -2147,17 +2155,17 @@
*/
/* register address for bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_adr(tc) (0x00007110 + (tc) * 0x4)
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_ADR(tc) (0x00007110 + (tc) * 0x4)
/* bitmask for bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_msk 0x0fff0000
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSK 0x0fff0000
/* inverted bitmask for bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_mskn 0xf000ffff
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSKN 0xf000ffff
/* lower bit position of bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_shift 16
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_SHIFT 16
/* width of bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_width 12
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_WIDTH 12
/* default value of bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_default 0x0
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_DEFAULT 0x0
/* tx data_tc{t}_weight[8:0] bitfield definitions
* preprocessor definitions for the bitfield "data_tc{t}_weight[8:0]".
@@ -2166,17 +2174,17 @@
*/
/* register address for bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_adr(tc) (0x00007110 + (tc) * 0x4)
+#define HW_ATL_TPS_DATA_TCTWEIGHT_ADR(tc) (0x00007110 + (tc) * 0x4)
/* bitmask for bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_msk 0x000001ff
+#define HW_ATL_TPS_DATA_TCTWEIGHT_MSK 0x000001ff
/* inverted bitmask for bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_mskn 0xfffffe00
+#define HW_ATL_TPS_DATA_TCTWEIGHT_MSKN 0xfffffe00
/* lower bit position of bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_shift 0
+#define HW_ATL_TPS_DATA_TCTWEIGHT_SHIFT 0
/* width of bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_width 9
+#define HW_ATL_TPS_DATA_TCTWEIGHT_WIDTH 9
/* default value of bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_default 0x0
+#define HW_ATL_TPS_DATA_TCTWEIGHT_DEFAULT 0x0
/* tx reg_res_dsbl bitfield definitions
* preprocessor definitions for the bitfield "reg_res_dsbl".
@@ -2184,17 +2192,17 @@
*/
/* register address for bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_adr 0x00007000
+#define HW_ATL_TX_REG_RES_DSBL_ADR 0x00007000
/* bitmask for bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_msk 0x20000000
+#define HW_ATL_TX_REG_RES_DSBL_MSK 0x20000000
/* inverted bitmask for bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_mskn 0xdfffffff
+#define HW_ATL_TX_REG_RES_DSBL_MSKN 0xdfffffff
/* lower bit position of bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_shift 29
+#define HW_ATL_TX_REG_RES_DSBL_SHIFT 29
/* width of bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_width 1
+#define HW_ATL_TX_REG_RES_DSBL_WIDTH 1
/* default value of bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_default 0x1
+#define HW_ATL_TX_REG_RES_DSBL_DEFAULT 0x1
/* mac_phy register access busy bitfield definitions
* preprocessor definitions for the bitfield "register access busy".
@@ -2202,15 +2210,15 @@
*/
/* register address for bitfield register access busy */
-#define msm_reg_access_busy_adr 0x00004400
+#define HW_ATL_MSM_REG_ACCESS_BUSY_ADR 0x00004400
/* bitmask for bitfield register access busy */
-#define msm_reg_access_busy_msk 0x00001000
+#define HW_ATL_MSM_REG_ACCESS_BUSY_MSK 0x00001000
/* inverted bitmask for bitfield register access busy */
-#define msm_reg_access_busy_mskn 0xffffefff
+#define HW_ATL_MSM_REG_ACCESS_BUSY_MSKN 0xffffefff
/* lower bit position of bitfield register access busy */
-#define msm_reg_access_busy_shift 12
+#define HW_ATL_MSM_REG_ACCESS_BUSY_SHIFT 12
/* width of bitfield register access busy */
-#define msm_reg_access_busy_width 1
+#define HW_ATL_MSM_REG_ACCESS_BUSY_WIDTH 1
/* mac_phy msm register address[7:0] bitfield definitions
* preprocessor definitions for the bitfield "msm register address[7:0]".
@@ -2218,17 +2226,17 @@
*/
/* register address for bitfield msm register address[7:0] */
-#define msm_reg_addr_adr 0x00004400
+#define HW_ATL_MSM_REG_ADDR_ADR 0x00004400
/* bitmask for bitfield msm register address[7:0] */
-#define msm_reg_addr_msk 0x000000ff
+#define HW_ATL_MSM_REG_ADDR_MSK 0x000000ff
/* inverted bitmask for bitfield msm register address[7:0] */
-#define msm_reg_addr_mskn 0xffffff00
+#define HW_ATL_MSM_REG_ADDR_MSKN 0xffffff00
/* lower bit position of bitfield msm register address[7:0] */
-#define msm_reg_addr_shift 0
+#define HW_ATL_MSM_REG_ADDR_SHIFT 0
/* width of bitfield msm register address[7:0] */
-#define msm_reg_addr_width 8
+#define HW_ATL_MSM_REG_ADDR_WIDTH 8
/* default value of bitfield msm register address[7:0] */
-#define msm_reg_addr_default 0x0
+#define HW_ATL_MSM_REG_ADDR_DEFAULT 0x0
/* mac_phy register read strobe bitfield definitions
* preprocessor definitions for the bitfield "register read strobe".
@@ -2236,17 +2244,17 @@
*/
/* register address for bitfield register read strobe */
-#define msm_reg_rd_strobe_adr 0x00004400
+#define HW_ATL_MSM_REG_RD_STROBE_ADR 0x00004400
/* bitmask for bitfield register read strobe */
-#define msm_reg_rd_strobe_msk 0x00000200
+#define HW_ATL_MSM_REG_RD_STROBE_MSK 0x00000200
/* inverted bitmask for bitfield register read strobe */
-#define msm_reg_rd_strobe_mskn 0xfffffdff
+#define HW_ATL_MSM_REG_RD_STROBE_MSKN 0xfffffdff
/* lower bit position of bitfield register read strobe */
-#define msm_reg_rd_strobe_shift 9
+#define HW_ATL_MSM_REG_RD_STROBE_SHIFT 9
/* width of bitfield register read strobe */
-#define msm_reg_rd_strobe_width 1
+#define HW_ATL_MSM_REG_RD_STROBE_WIDTH 1
/* default value of bitfield register read strobe */
-#define msm_reg_rd_strobe_default 0x0
+#define HW_ATL_MSM_REG_RD_STROBE_DEFAULT 0x0
/* mac_phy msm register read data[31:0] bitfield definitions
* preprocessor definitions for the bitfield "msm register read data[31:0]".
@@ -2254,15 +2262,15 @@
*/
/* register address for bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_adr 0x00004408
+#define HW_ATL_MSM_REG_RD_DATA_ADR 0x00004408
/* bitmask for bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_msk 0xffffffff
+#define HW_ATL_MSM_REG_RD_DATA_MSK 0xffffffff
/* inverted bitmask for bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_mskn 0x00000000
+#define HW_ATL_MSM_REG_RD_DATA_MSKN 0x00000000
/* lower bit position of bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_shift 0
+#define HW_ATL_MSM_REG_RD_DATA_SHIFT 0
/* width of bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_width 32
+#define HW_ATL_MSM_REG_RD_DATA_WIDTH 32
/* mac_phy msm register write data[31:0] bitfield definitions
* preprocessor definitions for the bitfield "msm register write data[31:0]".
@@ -2270,17 +2278,17 @@
*/
/* register address for bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_adr 0x00004404
+#define HW_ATL_MSM_REG_WR_DATA_ADR 0x00004404
/* bitmask for bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_msk 0xffffffff
+#define HW_ATL_MSM_REG_WR_DATA_MSK 0xffffffff
/* inverted bitmask for bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_mskn 0x00000000
+#define HW_ATL_MSM_REG_WR_DATA_MSKN 0x00000000
/* lower bit position of bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_shift 0
+#define HW_ATL_MSM_REG_WR_DATA_SHIFT 0
/* width of bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_width 32
+#define HW_ATL_MSM_REG_WR_DATA_WIDTH 32
/* default value of bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_default 0x0
+#define HW_ATL_MSM_REG_WR_DATA_DEFAULT 0x0
/* mac_phy register write strobe bitfield definitions
* preprocessor definitions for the bitfield "register write strobe".
@@ -2288,17 +2296,17 @@
*/
/* register address for bitfield register write strobe */
-#define msm_reg_wr_strobe_adr 0x00004400
+#define HW_ATL_MSM_REG_WR_STROBE_ADR 0x00004400
/* bitmask for bitfield register write strobe */
-#define msm_reg_wr_strobe_msk 0x00000100
+#define HW_ATL_MSM_REG_WR_STROBE_MSK 0x00000100
/* inverted bitmask for bitfield register write strobe */
-#define msm_reg_wr_strobe_mskn 0xfffffeff
+#define HW_ATL_MSM_REG_WR_STROBE_MSKN 0xfffffeff
/* lower bit position of bitfield register write strobe */
-#define msm_reg_wr_strobe_shift 8
+#define HW_ATL_MSM_REG_WR_STROBE_SHIFT 8
/* width of bitfield register write strobe */
-#define msm_reg_wr_strobe_width 1
+#define HW_ATL_MSM_REG_WR_STROBE_WIDTH 1
/* default value of bitfield register write strobe */
-#define msm_reg_wr_strobe_default 0x0
+#define HW_ATL_MSM_REG_WR_STROBE_DEFAULT 0x0
/* mif soft reset bitfield definitions
* preprocessor definitions for the bitfield "soft reset".
@@ -2306,17 +2314,17 @@
*/
/* register address for bitfield soft reset */
-#define glb_soft_res_adr 0x00000000
+#define HW_ATL_GLB_SOFT_RES_ADR 0x00000000
/* bitmask for bitfield soft reset */
-#define glb_soft_res_msk 0x00008000
+#define HW_ATL_GLB_SOFT_RES_MSK 0x00008000
/* inverted bitmask for bitfield soft reset */
-#define glb_soft_res_mskn 0xffff7fff
+#define HW_ATL_GLB_SOFT_RES_MSKN 0xffff7fff
/* lower bit position of bitfield soft reset */
-#define glb_soft_res_shift 15
+#define HW_ATL_GLB_SOFT_RES_SHIFT 15
/* width of bitfield soft reset */
-#define glb_soft_res_width 1
+#define HW_ATL_GLB_SOFT_RES_WIDTH 1
/* default value of bitfield soft reset */
-#define glb_soft_res_default 0x0
+#define HW_ATL_GLB_SOFT_RES_DEFAULT 0x0
/* mif register reset disable bitfield definitions
* preprocessor definitions for the bitfield "register reset disable".
@@ -2324,27 +2332,27 @@
*/
/* register address for bitfield register reset disable */
-#define glb_reg_res_dis_adr 0x00000000
+#define HW_ATL_GLB_REG_RES_DIS_ADR 0x00000000
/* bitmask for bitfield register reset disable */
-#define glb_reg_res_dis_msk 0x00004000
+#define HW_ATL_GLB_REG_RES_DIS_MSK 0x00004000
/* inverted bitmask for bitfield register reset disable */
-#define glb_reg_res_dis_mskn 0xffffbfff
+#define HW_ATL_GLB_REG_RES_DIS_MSKN 0xffffbfff
/* lower bit position of bitfield register reset disable */
-#define glb_reg_res_dis_shift 14
+#define HW_ATL_GLB_REG_RES_DIS_SHIFT 14
/* width of bitfield register reset disable */
-#define glb_reg_res_dis_width 1
+#define HW_ATL_GLB_REG_RES_DIS_WIDTH 1
/* default value of bitfield register reset disable */
-#define glb_reg_res_dis_default 0x1
+#define HW_ATL_GLB_REG_RES_DIS_DEFAULT 0x1
/* tx dma debug control definitions */
-#define tx_dma_debug_ctl_adr 0x00008920u
+#define HW_ATL_TX_DMA_DEBUG_CTL_ADR 0x00008920u
/* tx dma descriptor base address msw definitions */
-#define tx_dma_desc_base_addrmsw_adr(descriptor) \
+#define HW_ATL_TX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor) \
(0x00007c04u + (descriptor) * 0x40)
/* tx dma total request limit */
-#define tx_dma_total_req_limit_adr 0x00007b20u
+#define HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR 0x00007b20u
/* tx interrupt moderation control register definitions
* Preprocessor definitions for TX Interrupt Moderation Control Register
@@ -2352,7 +2360,7 @@
* Parameter: queue {Q} | stride size 0x4 | range [0, 31]
*/
-#define tx_intr_moderation_ctl_adr(queue) (0x00008980u + (queue) * 0x4)
+#define HW_ATL_TX_INTR_MODERATION_CTL_ADR(queue) (0x00008980u + (queue) * 0x4)
/* pcie reg_res_dsbl bitfield definitions
* preprocessor definitions for the bitfield "reg_res_dsbl".
@@ -2360,22 +2368,23 @@
*/
/* register address for bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_adr 0x00001000
+#define HW_ATL_PCI_REG_RES_DSBL_ADR 0x00001000
/* bitmask for bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_msk 0x20000000
+#define HW_ATL_PCI_REG_RES_DSBL_MSK 0x20000000
/* inverted bitmask for bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_mskn 0xdfffffff
+#define HW_ATL_PCI_REG_RES_DSBL_MSKN 0xdfffffff
/* lower bit position of bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_shift 29
+#define HW_ATL_PCI_REG_RES_DSBL_SHIFT 29
/* width of bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_width 1
+#define HW_ATL_PCI_REG_RES_DSBL_WIDTH 1
/* default value of bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_default 0x1
+#define HW_ATL_PCI_REG_RES_DSBL_DEFAULT 0x1
/* PCI core control register */
-#define pci_reg_control6_adr 0x1014u
+#define HW_ATL_PCI_REG_CONTROL6_ADR 0x1014u
/* global microprocessor scratch pad definitions */
-#define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4)
+#define HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp) \
+ (0x00000300u + (scratch_scp) * 0x4)
#endif /* HW_ATL_LLH_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index f2ce12ed4218..967f0fd07fcf 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -11,41 +11,244 @@
* abstraction layer.
*/
-#include "../aq_hw.h"
+#include "../aq_nic.h"
#include "../aq_hw_utils.h"
-#include "../aq_pci_func.h"
-#include "../aq_ring.h"
-#include "../aq_vec.h"
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
+#include "hw_atl_llh_internal.h"
#include <linux/random.h>
#define HW_ATL_UCP_0X370_REG 0x0370U
#define HW_ATL_FW_SM_RAM 0x2U
+#define HW_ATL_MPI_FW_VERSION 0x18
#define HW_ATL_MPI_CONTROL_ADR 0x0368U
#define HW_ATL_MPI_STATE_ADR 0x036CU
#define HW_ATL_MPI_STATE_MSK 0x00FFU
#define HW_ATL_MPI_STATE_SHIFT 0U
-#define HW_ATL_MPI_SPEED_MSK 0xFFFFU
+#define HW_ATL_MPI_SPEED_MSK 0xFFFF0000U
#define HW_ATL_MPI_SPEED_SHIFT 16U
-static int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
- u32 *p, u32 cnt)
+#define HW_ATL_MPI_DAISY_CHAIN_STATUS 0x704
+#define HW_ATL_MPI_BOOT_EXIT_CODE 0x388
+
+#define HW_ATL_MAC_PHY_CONTROL 0x4000
+#define HW_ATL_MAC_PHY_MPI_RESET_BIT 0x1D
+
+#define HW_ATL_FW_VER_1X 0x01050006U
+#define HW_ATL_FW_VER_2X 0x02000000U
+#define HW_ATL_FW_VER_3X 0x03000000U
+
+#define FORCE_FLASHLESS 0
+
+static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
+
+int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
{
int err = 0;
- AQ_HW_WAIT_FOR(reg_glb_cpu_sem_get(self,
- HW_ATL_FW_SM_RAM) == 1U,
- 1U, 10000U);
+ err = hw_atl_utils_soft_reset(self);
+ if (err)
+ return err;
+
+ hw_atl_utils_hw_chip_features_init(self,
+ &self->chip_features);
+
+ hw_atl_utils_get_fw_version(self, &self->fw_ver_actual);
+
+ if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X,
+ self->fw_ver_actual) == 0) {
+ *fw_ops = &aq_fw_1x_ops;
+ } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_2X,
+ self->fw_ver_actual) == 0) {
+ *fw_ops = &aq_fw_2x_ops;
+ } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_3X,
+ self->fw_ver_actual) == 0) {
+ *fw_ops = &aq_fw_2x_ops;
+ } else {
+ aq_pr_err("Bad FW version detected: %x\n",
+ self->fw_ver_actual);
+ return -EOPNOTSUPP;
+ }
+ self->aq_fw_ops = *fw_ops;
+ err = self->aq_fw_ops->init(self);
+ return err;
+}
+
+static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self)
+{
+ int k = 0;
+ u32 gsr;
+
+ aq_hw_write_reg(self, 0x404, 0x40e1);
+ AQ_HW_SLEEP(50);
+
+ /* Cleanup SPI */
+ aq_hw_write_reg(self, 0x534, 0xA0);
+ aq_hw_write_reg(self, 0x100, 0x9F);
+ aq_hw_write_reg(self, 0x100, 0x809F);
+
+ gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
+ aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000);
+
+ /* Kickstart MAC */
+ aq_hw_write_reg(self, 0x404, 0x80e0);
+ aq_hw_write_reg(self, 0x32a8, 0x0);
+ aq_hw_write_reg(self, 0x520, 0x1);
+ AQ_HW_SLEEP(10);
+ aq_hw_write_reg(self, 0x404, 0x180e0);
+
+ for (k = 0; k < 1000; k++) {
+ u32 flb_status = aq_hw_read_reg(self,
+ HW_ATL_MPI_DAISY_CHAIN_STATUS);
+
+ flb_status = flb_status & 0x10;
+ if (flb_status)
+ break;
+ AQ_HW_SLEEP(10);
+ }
+ if (k == 1000) {
+ aq_pr_err("MAC kickstart failed\n");
+ return -EIO;
+ }
+
+ /* FW reset */
+ aq_hw_write_reg(self, 0x404, 0x80e0);
+ AQ_HW_SLEEP(50);
+ aq_hw_write_reg(self, 0x3a0, 0x1);
+
+ /* Kickstart PHY - skipped */
+
+ /* Global software reset*/
+ hw_atl_rx_rx_reg_res_dis_set(self, 0U);
+ hw_atl_tx_tx_reg_res_dis_set(self, 0U);
+ aq_hw_write_reg_bit(self, HW_ATL_MAC_PHY_CONTROL,
+ BIT(HW_ATL_MAC_PHY_MPI_RESET_BIT),
+ HW_ATL_MAC_PHY_MPI_RESET_BIT, 0x0);
+ gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
+ aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000);
+
+ for (k = 0; k < 1000; k++) {
+ u32 fw_state = aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION);
+
+ if (fw_state)
+ break;
+ AQ_HW_SLEEP(10);
+ }
+ if (k == 1000) {
+ aq_pr_err("FW kickstart failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
+{
+ u32 gsr, rbl_status;
+ int k;
+
+ aq_hw_write_reg(self, 0x404, 0x40e1);
+ aq_hw_write_reg(self, 0x3a0, 0x1);
+ aq_hw_write_reg(self, 0x32a8, 0x0);
+
+ /* Alter RBL status */
+ aq_hw_write_reg(self, 0x388, 0xDEAD);
+
+ /* Global software reset*/
+ hw_atl_rx_rx_reg_res_dis_set(self, 0U);
+ hw_atl_tx_tx_reg_res_dis_set(self, 0U);
+ aq_hw_write_reg_bit(self, HW_ATL_MAC_PHY_CONTROL,
+ BIT(HW_ATL_MAC_PHY_MPI_RESET_BIT),
+ HW_ATL_MAC_PHY_MPI_RESET_BIT, 0x0);
+ gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
+ aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR,
+ (gsr & 0xFFFFBFFF) | 0x8000);
+
+ if (FORCE_FLASHLESS)
+ aq_hw_write_reg(self, 0x534, 0x0);
+
+ aq_hw_write_reg(self, 0x404, 0x40e0);
+
+ /* Wait for RBL boot */
+ for (k = 0; k < 1000; k++) {
+ rbl_status = aq_hw_read_reg(self, 0x388) & 0xFFFF;
+ if (rbl_status && rbl_status != 0xDEAD)
+ break;
+ AQ_HW_SLEEP(10);
+ }
+ if (!rbl_status || rbl_status == 0xDEAD) {
+ aq_pr_err("RBL Restart failed");
+ return -EIO;
+ }
+
+ /* Restore NVR */
+ if (FORCE_FLASHLESS)
+ aq_hw_write_reg(self, 0x534, 0xA0);
+
+ if (rbl_status == 0xF1A7) {
+ aq_pr_err("No FW detected. Dynamic FW load not implemented\n");
+ return -ENOTSUPP;
+ }
+
+ for (k = 0; k < 1000; k++) {
+ u32 fw_state = aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION);
+
+ if (fw_state)
+ break;
+ AQ_HW_SLEEP(10);
+ }
+ if (k == 1000) {
+ aq_pr_err("FW kickstart failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hw_atl_utils_soft_reset(struct aq_hw_s *self)
+{
+ int k;
+ u32 boot_exit_code = 0;
+
+ for (k = 0; k < 1000; ++k) {
+ u32 flb_status = aq_hw_read_reg(self,
+ HW_ATL_MPI_DAISY_CHAIN_STATUS);
+ boot_exit_code = aq_hw_read_reg(self,
+ HW_ATL_MPI_BOOT_EXIT_CODE);
+ if (flb_status != 0x06000000 || boot_exit_code != 0)
+ break;
+ }
+
+ if (k == 1000) {
+ aq_pr_err("Neither RBL nor FLB firmware started\n");
+ return -EOPNOTSUPP;
+ }
+
+ self->rbl_enabled = (boot_exit_code != 0);
+
+ if (self->rbl_enabled)
+ return hw_atl_utils_soft_reset_rbl(self);
+ else
+ return hw_atl_utils_soft_reset_flb(self);
+}
+
+int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
+ u32 *p, u32 cnt)
+{
+ int err = 0;
+
+ AQ_HW_WAIT_FOR(hw_atl_reg_glb_cpu_sem_get(self,
+ HW_ATL_FW_SM_RAM) == 1U,
+ 1U, 10000U);
if (err < 0) {
bool is_locked;
- reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
- is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
+ hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+ is_locked = hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
if (!is_locked) {
err = -ETIME;
goto err_exit;
@@ -66,7 +269,7 @@ static int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
*(p++) = aq_hw_read_reg(self, 0x0000020CU);
}
- reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+ hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
err_exit:
return err;
@@ -78,7 +281,7 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
int err = 0;
bool is_locked;
- is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
+ is_locked = hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
if (!is_locked) {
err = -ETIME;
goto err_exit;
@@ -97,7 +300,7 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
}
}
- reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+ hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
err_exit:
return err;
@@ -119,7 +322,7 @@ err_exit:
}
static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps)
+ const struct aq_hw_caps_s *aq_hw_caps)
{
int err = 0;
@@ -133,20 +336,12 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
}
- reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U);
+ hw_atl_reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U);
/* check 10 times by 1ms */
- AQ_HW_WAIT_FOR(0U != (PHAL_ATLANTIC_A0->mbox_addr =
+ AQ_HW_WAIT_FOR(0U != (self->mbox_addr =
aq_hw_read_reg(self, 0x360U)), 1000U, 10U);
- err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected,
- aq_hw_read_reg(self, 0x18U));
-
- if (err < 0)
- pr_err("%s: Bad FW version detected: expected=%x, actual=%x\n",
- AQ_CFG_DRV_NAME,
- aq_hw_caps->fw_ver_expected,
- aq_hw_read_reg(self, 0x18U));
return err;
}
@@ -174,14 +369,14 @@ static int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
err = -1;
goto err_exit;
}
- err = hw_atl_utils_fw_upload_dwords(self, PHAL_ATLANTIC->rpc_addr,
- (u32 *)(void *)&PHAL_ATLANTIC->rpc,
+ err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr,
+ (u32 *)(void *)&self->rpc,
(rpc_size + sizeof(u32) -
sizeof(u8)) / sizeof(u32));
if (err < 0)
goto err_exit;
- sw.tid = 0xFFFFU & (++PHAL_ATLANTIC->rpc_tid);
+ sw.tid = 0xFFFFU & (++self->rpc_tid);
sw.len = (u16)rpc_size;
aq_hw_write_reg(self, HW_ATL_RPC_CONTROL_ADR, sw.val);
@@ -199,7 +394,7 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
do {
sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR);
- PHAL_ATLANTIC->rpc_tid = sw.tid;
+ self->rpc_tid = sw.tid;
AQ_HW_WAIT_FOR(sw.tid ==
(fw.val =
@@ -221,9 +416,9 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
if (fw.len) {
err =
hw_atl_utils_fw_downld_dwords(self,
- PHAL_ATLANTIC->rpc_addr,
+ self->rpc_addr,
(u32 *)(void *)
- &PHAL_ATLANTIC->rpc,
+ &self->rpc,
(fw.len + sizeof(u32) -
sizeof(u8)) /
sizeof(u32));
@@ -231,19 +426,18 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
goto err_exit;
}
- *rpc = &PHAL_ATLANTIC->rpc;
+ *rpc = &self->rpc;
}
err_exit:
return err;
}
-static int hw_atl_utils_mpi_create(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps)
+static int hw_atl_utils_mpi_create(struct aq_hw_s *self)
{
int err = 0;
- err = hw_atl_utils_init_ucp(self, aq_hw_caps);
+ err = hw_atl_utils_init_ucp(self, self->aq_nic_cfg->aq_hw_caps);
if (err < 0)
goto err_exit;
@@ -259,7 +453,7 @@ int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
struct hw_aq_atl_utils_mbox_header *pmbox)
{
return hw_atl_utils_fw_downld_dwords(self,
- PHAL_ATLANTIC->mbox_addr,
+ self->mbox_addr,
(u32 *)(void *)pmbox,
sizeof(*pmbox) / sizeof(u32));
}
@@ -270,7 +464,7 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
int err = 0;
err = hw_atl_utils_fw_downld_dwords(self,
- PHAL_ATLANTIC->mbox_addr,
+ self->mbox_addr,
(u32 *)(void *)pmbox,
sizeof(*pmbox) / sizeof(u32));
if (err < 0)
@@ -281,27 +475,27 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
self->aq_nic_cfg->mtu : 1514U;
pmbox->stats.ubrc = pmbox->stats.uprc * mtu;
pmbox->stats.ubtc = pmbox->stats.uptc * mtu;
- pmbox->stats.dpc = atomic_read(&PHAL_ATLANTIC_A0->dpc);
+ pmbox->stats.dpc = atomic_read(&self->dpc);
} else {
- pmbox->stats.dpc = reg_rx_dma_stat_counter7get(self);
+ pmbox->stats.dpc = hw_atl_reg_rx_dma_stat_counter7get(self);
}
err_exit:;
}
-int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed,
- enum hal_atl_utils_fw_state_e state)
+int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed)
{
- u32 ucp_0x368 = 0;
+ u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
- ucp_0x368 = (speed << HW_ATL_MPI_SPEED_SHIFT) | state;
- aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, ucp_0x368);
+ val = (val & HW_ATL_MPI_STATE_MSK) | (speed << HW_ATL_MPI_SPEED_SHIFT);
+ aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
return 0;
}
void hw_atl_utils_mpi_set(struct aq_hw_s *self,
- enum hal_atl_utils_fw_state_e state, u32 speed)
+ enum hal_atl_utils_fw_state_e state,
+ u32 speed)
{
int err = 0;
u32 transaction_id = 0;
@@ -320,11 +514,22 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
goto err_exit;
}
- err = hw_atl_utils_mpi_set_speed(self, speed, state);
+ aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR,
+ (speed << HW_ATL_MPI_SPEED_SHIFT) | state);
err_exit:;
}
+static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state)
+{
+ u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
+
+ val = state | (val & HW_ATL_MPI_SPEED_MSK);
+ aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
+ return 0;
+}
+
int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
{
u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR);
@@ -365,7 +570,6 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
}
int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps,
u8 *mac)
{
int err = 0;
@@ -373,15 +577,6 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
u32 l = 0U;
u32 mac_addr[2];
- self->mmio = aq_pci_func_get_mmio(self->aq_pci_func);
-
- hw_atl_utils_hw_chip_features_init(self,
- &PHAL_ATLANTIC_A0->chip_features);
-
- err = hw_atl_utils_mpi_create(self, aq_hw_caps);
- if (err < 0)
- goto err_exit;
-
if (!aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) {
unsigned int rnd = 0;
unsigned int ucp_0x370 = 0;
@@ -396,7 +591,7 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
aq_hw_read_reg(self, 0x00000374U) +
(40U * 4U),
mac_addr,
- AQ_DIMOF(mac_addr));
+ ARRAY_SIZE(mac_addr));
if (err < 0) {
mac_addr[0] = 0U;
mac_addr[1] = 0U;
@@ -427,7 +622,6 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
mac[0] = (u8)(0xFFU & h);
}
-err_exit:
return err;
}
@@ -465,7 +659,7 @@ unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps)
void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
{
u32 chip_features = 0U;
- u32 val = reg_glb_mif_id_get(self);
+ u32 val = hw_atl_reg_glb_mif_id_get(self);
u32 mif_rev = val & 0xFFU;
if ((3U & mif_rev) == 1U) {
@@ -500,13 +694,13 @@ int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
int hw_atl_utils_update_stats(struct aq_hw_s *self)
{
- struct hw_atl_s *hw_self = PHAL_ATLANTIC;
struct hw_aq_atl_utils_mbox mbox;
hw_atl_utils_mpi_read_stats(self, &mbox);
-#define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \
- mbox.stats._N_ - hw_self->last_stats._N_)
+#define AQ_SDELTA(_N_) (self->curr_stats._N_ += \
+ mbox.stats._N_ - self->last_stats._N_)
+
if (self->aq_link_status.mbps) {
AQ_SDELTA(uprc);
AQ_SDELTA(mprc);
@@ -527,19 +721,19 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self)
AQ_SDELTA(dpc);
}
#undef AQ_SDELTA
- hw_self->curr_stats.dma_pkt_rc = stats_rx_dma_good_pkt_counterlsw_get(self);
- hw_self->curr_stats.dma_pkt_tc = stats_tx_dma_good_pkt_counterlsw_get(self);
- hw_self->curr_stats.dma_oct_rc = stats_rx_dma_good_octet_counterlsw_get(self);
- hw_self->curr_stats.dma_oct_tc = stats_tx_dma_good_octet_counterlsw_get(self);
+ self->curr_stats.dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counterlsw_get(self);
+ self->curr_stats.dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counterlsw_get(self);
+ self->curr_stats.dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counterlsw_get(self);
+ self->curr_stats.dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counterlsw_get(self);
- memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats));
+ memcpy(&self->last_stats, &mbox.stats, sizeof(mbox.stats));
return 0;
}
struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self)
{
- return &PHAL_ATLANTIC->curr_stats;
+ return &self->curr_stats;
}
static const u32 hw_atl_utils_hw_mac_regs[] = {
@@ -568,14 +762,14 @@ static const u32 hw_atl_utils_hw_mac_regs[] = {
};
int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps,
+ const struct aq_hw_caps_s *aq_hw_caps,
u32 *regs_buff)
{
unsigned int i = 0U;
for (i = 0; i < aq_hw_caps->mac_regs_count; i++)
regs_buff[i] = aq_hw_read_reg(self,
- hw_atl_utils_hw_mac_regs[i]);
+ hw_atl_utils_hw_mac_regs[i]);
return 0;
}
@@ -584,3 +778,13 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version)
*fw_version = aq_hw_read_reg(self, 0x18U);
return 0;
}
+
+const struct aq_fw_ops aq_fw_1x_ops = {
+ .init = hw_atl_utils_mpi_create,
+ .reset = NULL,
+ .get_mac_permanent = hw_atl_utils_get_mac_permanent,
+ .set_link_speed = hw_atl_utils_mpi_set_speed,
+ .set_state = hw_atl_utils_mpi_set_state,
+ .update_link_status = hw_atl_utils_mpi_get_link_status,
+ .update_stats = hw_atl_utils_update_stats,
+};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index 21aeca6908d3..2c690947910a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -14,10 +14,39 @@
#ifndef HW_ATL_UTILS_H
#define HW_ATL_UTILS_H
-#include "../aq_common.h"
-
#define HW_ATL_FLUSH() { (void)aq_hw_read_reg(self, 0x10); }
+/* Hardware tx descriptor */
+struct __packed hw_atl_txd_s {
+ u64 buf_addr;
+ u32 ctl;
+ u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
+};
+
+/* Hardware tx context descriptor */
+struct __packed hw_atl_txc_s {
+ u32 rsvd;
+ u32 len;
+ u32 ctl;
+ u32 len2;
+};
+
+/* Hardware rx descriptor */
+struct __packed hw_atl_rxd_s {
+ u64 buf_addr;
+ u64 hdr_addr;
+};
+
+/* Hardware rx descriptor writeback */
+struct __packed hw_atl_rxd_wb_s {
+ u32 type;
+ u32 rss_hash;
+ u16 status;
+ u16 pkt_len;
+ u16 next_desc_ptr;
+ u16 vlan;
+};
+
struct __packed hw_atl_stats_s {
u32 uprc;
u32 mprc;
@@ -126,26 +155,6 @@ struct __packed hw_aq_atl_utils_mbox {
struct hw_atl_stats_s stats;
};
-struct __packed hw_atl_s {
- struct aq_hw_s base;
- struct hw_atl_stats_s last_stats;
- struct aq_stats_s curr_stats;
- u64 speed;
- unsigned int chip_features;
- u32 fw_ver_actual;
- atomic_t dpc;
- u32 mbox_addr;
- u32 rpc_addr;
- u32 rpc_tid;
- struct hw_aq_atl_utils_fw_rpc rpc;
-};
-
-#define SELF ((struct hw_atl_s *)self)
-
-#define PHAL_ATLANTIC ((struct hw_atl_s *)((void *)(self)))
-#define PHAL_ATLANTIC_A0 ((struct hw_atl_s *)((void *)(self)))
-#define PHAL_ATLANTIC_B0 ((struct hw_atl_s *)((void *)(self)))
-
#define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U
#define HAL_ATLANTIC_UTILS_CHIP_TPO2 0x00000002U
#define HAL_ATLANTIC_UTILS_CHIP_RPF2 0x00000004U
@@ -154,7 +163,7 @@ struct __packed hw_atl_s {
#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U
#define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \
- PHAL_ATLANTIC->chip_features)
+ self->chip_features)
enum hal_atl_utils_fw_state_e {
MPI_DEINIT = 0,
@@ -171,6 +180,73 @@ enum hal_atl_utils_fw_state_e {
#define HAL_ATLANTIC_RATE_100M BIT(5)
#define HAL_ATLANTIC_RATE_INVALID BIT(6)
+enum hw_atl_fw2x_rate {
+ FW2X_RATE_100M = 0x20,
+ FW2X_RATE_1G = 0x100,
+ FW2X_RATE_2G5 = 0x200,
+ FW2X_RATE_5G = 0x400,
+ FW2X_RATE_10G = 0x800,
+};
+
+enum hw_atl_fw2x_caps_lo {
+ CAPS_LO_10BASET_HD = 0x00,
+ CAPS_LO_10BASET_FD,
+ CAPS_LO_100BASETX_HD,
+ CAPS_LO_100BASET4_HD,
+ CAPS_LO_100BASET2_HD,
+ CAPS_LO_100BASETX_FD,
+ CAPS_LO_100BASET2_FD,
+ CAPS_LO_1000BASET_HD,
+ CAPS_LO_1000BASET_FD,
+ CAPS_LO_2P5GBASET_FD,
+ CAPS_LO_5GBASET_FD,
+ CAPS_LO_10GBASET_FD,
+};
+
+enum hw_atl_fw2x_caps_hi {
+ CAPS_HI_RESERVED1 = 0x00,
+ CAPS_HI_10BASET_EEE,
+ CAPS_HI_RESERVED2,
+ CAPS_HI_PAUSE,
+ CAPS_HI_ASYMMETRIC_PAUSE,
+ CAPS_HI_100BASETX_EEE,
+ CAPS_HI_RESERVED3,
+ CAPS_HI_RESERVED4,
+ CAPS_HI_1000BASET_FD_EEE,
+ CAPS_HI_2P5GBASET_FD_EEE,
+ CAPS_HI_5GBASET_FD_EEE,
+ CAPS_HI_10GBASET_FD_EEE,
+ CAPS_HI_RESERVED5,
+ CAPS_HI_RESERVED6,
+ CAPS_HI_RESERVED7,
+ CAPS_HI_RESERVED8,
+ CAPS_HI_RESERVED9,
+ CAPS_HI_CABLE_DIAG,
+ CAPS_HI_TEMPERATURE,
+ CAPS_HI_DOWNSHIFT,
+ CAPS_HI_PTP_AVB_EN,
+ CAPS_HI_MEDIA_DETECT,
+ CAPS_HI_LINK_DROP,
+ CAPS_HI_SLEEP_PROXY,
+ CAPS_HI_WOL,
+ CAPS_HI_MAC_STOP,
+ CAPS_HI_EXT_LOOPBACK,
+ CAPS_HI_INT_LOOPBACK,
+ CAPS_HI_EFUSE_AGENT,
+ CAPS_HI_WOL_TIMER,
+ CAPS_HI_STATISTICS,
+ CAPS_HI_TRANSACTION_ID,
+};
+
+struct aq_hw_s;
+struct aq_fw_ops;
+struct aq_hw_caps_s;
+struct aq_hw_link_status_s;
+
+int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops);
+
+int hw_atl_utils_soft_reset(struct aq_hw_s *self);
+
void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p);
int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
@@ -183,19 +259,15 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
enum hal_atl_utils_fw_state_e state,
u32 speed);
-int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed,
- enum hal_atl_utils_fw_state_e state);
-
int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self);
int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps,
u8 *mac);
unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps);
int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps,
+ const struct aq_hw_caps_s *aq_hw_caps,
u32 *regs_buff);
int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
@@ -208,5 +280,10 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
int hw_atl_utils_update_stats(struct aq_hw_s *self);
struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self);
+int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
+ u32 *p, u32 cnt);
+
+extern const struct aq_fw_ops aq_fw_1x_ops;
+extern const struct aq_fw_ops aq_fw_2x_ops;
#endif /* HW_ATL_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
new file mode 100644
index 000000000000..8cfce95c82fc
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -0,0 +1,184 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_utils_fw2x.c: Definition of firmware 2.x functions for
+ * Atlantic hardware abstraction layer.
+ */
+
+#include "../aq_hw.h"
+#include "../aq_hw_utils.h"
+#include "../aq_pci_func.h"
+#include "../aq_ring.h"
+#include "../aq_vec.h"
+#include "hw_atl_utils.h"
+#include "hw_atl_llh.h"
+
+#define HW_ATL_FW2X_MPI_EFUSE_ADDR 0x364
+#define HW_ATL_FW2X_MPI_MBOX_ADDR 0x360
+
+#define HW_ATL_FW2X_MPI_CONTROL_ADDR 0x368
+#define HW_ATL_FW2X_MPI_CONTROL2_ADDR 0x36C
+
+#define HW_ATL_FW2X_MPI_STATE_ADDR 0x370
+#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374
+
+static int aq_fw2x_init(struct aq_hw_s *self)
+{
+ int err = 0;
+
+ /* check 10 times by 1ms */
+ AQ_HW_WAIT_FOR(0U != (self->mbox_addr =
+ aq_hw_read_reg(self, HW_ATL_FW2X_MPI_MBOX_ADDR)),
+ 1000U, 10U);
+ return err;
+}
+
+static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed)
+{
+ enum hw_atl_fw2x_rate rate = 0;
+
+ if (speed & AQ_NIC_RATE_10G)
+ rate |= FW2X_RATE_10G;
+
+ if (speed & AQ_NIC_RATE_5G)
+ rate |= FW2X_RATE_5G;
+
+ if (speed & AQ_NIC_RATE_5GSR)
+ rate |= FW2X_RATE_5G;
+
+ if (speed & AQ_NIC_RATE_2GS)
+ rate |= FW2X_RATE_2G5;
+
+ if (speed & AQ_NIC_RATE_1G)
+ rate |= FW2X_RATE_1G;
+
+ if (speed & AQ_NIC_RATE_100M)
+ rate |= FW2X_RATE_100M;
+
+ return rate;
+}
+
+static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed)
+{
+ u32 val = link_speed_mask_2fw2x_ratemask(speed);
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL_ADDR, val);
+
+ return 0;
+}
+
+static int aq_fw2x_set_state(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state)
+{
+ /* No explicit state in 2x fw */
+ return 0;
+}
+
+static int aq_fw2x_update_link_status(struct aq_hw_s *self)
+{
+ u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR);
+ u32 speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G |
+ FW2X_RATE_2G5 | FW2X_RATE_5G | FW2X_RATE_10G);
+ struct aq_hw_link_status_s *link_status = &self->aq_link_status;
+
+ if (speed) {
+ if (speed & FW2X_RATE_10G)
+ link_status->mbps = 10000;
+ else if (speed & FW2X_RATE_5G)
+ link_status->mbps = 5000;
+ else if (speed & FW2X_RATE_2G5)
+ link_status->mbps = 2500;
+ else if (speed & FW2X_RATE_1G)
+ link_status->mbps = 1000;
+ else if (speed & FW2X_RATE_100M)
+ link_status->mbps = 100;
+ else
+ link_status->mbps = 10000;
+ } else {
+ link_status->mbps = 0;
+ }
+
+ return 0;
+}
+
+int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
+{
+ int err = 0;
+ u32 h = 0U;
+ u32 l = 0U;
+ u32 mac_addr[2] = { 0 };
+ u32 efuse_addr = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_EFUSE_ADDR);
+
+ if (efuse_addr != 0) {
+ err = hw_atl_utils_fw_downld_dwords(self,
+ efuse_addr + (40U * 4U),
+ mac_addr,
+ ARRAY_SIZE(mac_addr));
+ if (err)
+ return err;
+ mac_addr[0] = __swab32(mac_addr[0]);
+ mac_addr[1] = __swab32(mac_addr[1]);
+ }
+
+ ether_addr_copy(mac, (u8 *)mac_addr);
+
+ if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) {
+ unsigned int rnd = 0;
+
+ get_random_bytes(&rnd, sizeof(unsigned int));
+
+ l = 0xE3000000U
+ | (0xFFFFU & rnd)
+ | (0x00 << 16);
+ h = 0x8001300EU;
+
+ mac[5] = (u8)(0xFFU & l);
+ l >>= 8;
+ mac[4] = (u8)(0xFFU & l);
+ l >>= 8;
+ mac[3] = (u8)(0xFFU & l);
+ l >>= 8;
+ mac[2] = (u8)(0xFFU & l);
+ mac[1] = (u8)(0xFFU & h);
+ h >>= 8;
+ mac[0] = (u8)(0xFFU & h);
+ }
+ return err;
+}
+
+static int aq_fw2x_update_stats(struct aq_hw_s *self)
+{
+ int err = 0;
+ u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ u32 orig_stats_val = mpi_opts & BIT(CAPS_HI_STATISTICS);
+
+ /* Toggle statistics bit for FW to update */
+ mpi_opts = mpi_opts ^ BIT(CAPS_HI_STATISTICS);
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+
+ /* Wait FW to report back */
+ AQ_HW_WAIT_FOR(orig_stats_val !=
+ (aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) &
+ BIT(CAPS_HI_STATISTICS)),
+ 1U, 10000U);
+ if (err)
+ return err;
+
+ return hw_atl_utils_update_stats(self);
+}
+
+const struct aq_fw_ops aq_fw_2x_ops = {
+ .init = aq_fw2x_init,
+ .reset = NULL,
+ .get_mac_permanent = aq_fw2x_get_mac_permanent,
+ .set_link_speed = aq_fw2x_set_link_speed,
+ .set_state = aq_fw2x_set_state,
+ .update_link_status = aq_fw2x_update_link_status,
+ .update_stats = aq_fw2x_update_stats,
+};
diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h
index 9009f2651e70..5265b937677b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/ver.h
+++ b/drivers/net/ethernet/aquantia/atlantic/ver.h
@@ -10,9 +10,9 @@
#ifndef VER_H
#define VER_H
-#define NIC_MAJOR_DRIVER_VERSION 1
-#define NIC_MINOR_DRIVER_VERSION 6
-#define NIC_BUILD_DRIVER_VERSION 13
+#define NIC_MAJOR_DRIVER_VERSION 2
+#define NIC_MINOR_DRIVER_VERSION 0
+#define NIC_BUILD_DRIVER_VERSION 2
#define NIC_REVISION_DRIVER_VERSION 0
#define AQ_CFG_DRV_VERSION_SUFFIX "-kern"
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index d9346e2ac720..14a59e51db67 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1716,7 +1716,6 @@ static int bcm_enet_probe(struct platform_device *pdev)
struct bcm63xx_enet_platform_data *pd;
struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
struct mii_bus *bus;
- const char *clk_name;
int i, ret;
if (!bcm_enet_shared_base[0])
@@ -1751,20 +1750,8 @@ static int bcm_enet_probe(struct platform_device *pdev)
dev->irq = priv->irq = res_irq->start;
priv->irq_rx = res_irq_rx->start;
priv->irq_tx = res_irq_tx->start;
- priv->mac_id = pdev->id;
- /* get rx & tx dma channel id for this mac */
- if (priv->mac_id == 0) {
- priv->rx_chan = 0;
- priv->tx_chan = 1;
- clk_name = "enet0";
- } else {
- priv->rx_chan = 2;
- priv->tx_chan = 3;
- clk_name = "enet1";
- }
-
- priv->mac_clk = devm_clk_get(&pdev->dev, clk_name);
+ priv->mac_clk = devm_clk_get(&pdev->dev, "enet");
if (IS_ERR(priv->mac_clk)) {
ret = PTR_ERR(priv->mac_clk);
goto out;
@@ -1795,9 +1782,11 @@ static int bcm_enet_probe(struct platform_device *pdev)
priv->dma_chan_width = pd->dma_chan_width;
priv->dma_has_sram = pd->dma_has_sram;
priv->dma_desc_shift = pd->dma_desc_shift;
+ priv->rx_chan = pd->rx_chan;
+ priv->tx_chan = pd->tx_chan;
}
- if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
+ if (priv->has_phy && !priv->use_external_mii) {
/* using internal PHY, enable clock */
priv->phy_clk = devm_clk_get(&pdev->dev, "ephy");
if (IS_ERR(priv->phy_clk)) {
@@ -1828,7 +1817,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
bus->priv = priv;
bus->read = bcm_enet_mdio_read_phylib;
bus->write = bcm_enet_mdio_write_phylib;
- sprintf(bus->id, "%s-%d", pdev->name, priv->mac_id);
+ sprintf(bus->id, "%s-%d", pdev->name, pdev->id);
/* only probe bus where we think the PHY is, because
* the mdio read operation return 0 instead of 0xffff
@@ -2139,27 +2128,25 @@ static int bcm_enetsw_open(struct net_device *dev)
/* allocate rx dma ring */
size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
+ p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
if (!p) {
dev_err(kdev, "cannot allocate rx ring %u\n", size);
ret = -ENOMEM;
goto out_freeirq_tx;
}
- memset(p, 0, size);
priv->rx_desc_alloc_size = size;
priv->rx_desc_cpu = p;
/* allocate tx dma ring */
size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
+ p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
if (!p) {
dev_err(kdev, "cannot allocate tx ring\n");
ret = -ENOMEM;
goto out_free_rx_ring;
}
- memset(p, 0, size);
priv->tx_desc_alloc_size = size;
priv->tx_desc_cpu = p;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
index 5a66728d4776..1d3c917eb830 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
@@ -193,9 +193,6 @@ struct bcm_enet_mib_counters {
struct bcm_enet_priv {
- /* mac id (from platform device id) */
- int mac_id;
-
/* base remapped address of device */
void __iomem *base;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 087f01b4dc3a..f15a8fc6dfc9 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1216,18 +1216,6 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
goto out;
}
- /* The Ethernet switch we are interfaced with needs packets to be at
- * least 64 bytes (including FCS) otherwise they will be discarded when
- * they enter the switch port logic. When Broadcom tags are enabled, we
- * need to make sure that packets are at least 68 bytes
- * (including FCS and tag) because the length verification is done after
- * the Broadcom tag is stripped off the ingress packet.
- */
- if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
- ret = NETDEV_TX_OK;
- goto out;
- }
-
/* Insert TSB and checksum infos */
if (priv->tsb_en) {
skb = bcm_sysport_insert_tsb(skb, dev);
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 1d96cd594ade..8eef9fb6b1fe 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -128,8 +128,6 @@ bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
dma_desc->ctl1 = cpu_to_le32(ctl1);
}
-#define ENET_BRCM_TAG_LEN 4
-
static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
struct bgmac_dma_ring *ring,
struct sk_buff *skb)
@@ -142,18 +140,6 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
u32 flags;
int i;
- /* The Ethernet switch we are interfaced with needs packets to be at
- * least 64 bytes (including FCS) otherwise they will be discarded when
- * they enter the switch port logic. When Broadcom tags are enabled, we
- * need to make sure that packets are at least 68 bytes
- * (including FCS and tag) because the length verification is done after
- * the Broadcom tag is stripped off the ingress packet.
- */
- if (netdev_uses_dsa(net_dev)) {
- if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN))
- goto err_stats;
- }
-
if (skb->len > BGMAC_DESC_CTL1_LEN) {
netdev_err(bgmac->net_dev, "Too long skb (%d)\n", skb->len);
goto err_drop;
@@ -240,7 +226,6 @@ err_dma_head:
err_drop:
dev_kfree_skb(skb);
-err_stats:
net_dev->stats.tx_dropped++;
net_dev->stats.tx_errors++;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 7919f6112ecf..5e34b34f7740 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -5818,8 +5818,8 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
struct l2_fhdr *rx_hdr;
int ret = -ENODEV;
struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
- struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
- struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+ struct bnx2_tx_ring_info *txr;
+ struct bnx2_rx_ring_info *rxr;
tx_napi = bnapi;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 8ae269ec17a1..d7c98e807ca8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -738,8 +738,9 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
break;
default:
- WARN_ONCE(1, "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
- be16_to_cpu(skb->protocol));
+ netdev_WARN_ONCE(bp->dev,
+ "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
+ be16_to_cpu(skb->protocol));
}
}
#endif
@@ -2482,8 +2483,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
*/
if (bp->dev->features & NETIF_F_LRO)
fp->mode = TPA_MODE_LRO;
- else if (bp->dev->features & NETIF_F_GRO &&
- bnx2x_mtu_allows_gro(bp->dev->mtu))
+ else if (bp->dev->features & NETIF_F_GRO_HW)
fp->mode = TPA_MODE_GRO;
else
fp->mode = TPA_MODE_DISABLED;
@@ -4874,6 +4874,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
*/
dev->mtu = new_mtu;
+ if (!bnx2x_mtu_allows_gro(new_mtu))
+ dev->features &= ~NETIF_F_GRO_HW;
+
if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
@@ -4903,10 +4906,13 @@ netdev_features_t bnx2x_fix_features(struct net_device *dev,
}
/* TPA requires Rx CSUM offloading */
- if (!(features & NETIF_F_RXCSUM)) {
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_LRO;
+
+ if (!(features & NETIF_F_GRO) || !bnx2x_mtu_allows_gro(dev->mtu))
+ features &= ~NETIF_F_GRO_HW;
+ if (features & NETIF_F_GRO_HW)
features &= ~NETIF_F_LRO;
- features &= ~NETIF_F_GRO;
- }
return features;
}
@@ -4933,13 +4939,8 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
}
}
- /* if GRO is changed while LRO is enabled, don't force a reload */
- if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
- changes &= ~NETIF_F_GRO;
-
- /* if GRO is changed while HW TPA is off, don't force a reload */
- if ((changes & NETIF_F_GRO) && bp->disable_tpa)
- changes &= ~NETIF_F_GRO;
+ /* Don't care about GRO changes */
+ changes &= ~NETIF_F_GRO;
if (changes)
bnx2x_reload = true;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index ddd5d3ebd201..7b08323e3f3d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12409,8 +12409,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
/* Set TPA flags */
if (bp->disable_tpa) {
- bp->dev->hw_features &= ~NETIF_F_LRO;
- bp->dev->features &= ~NETIF_F_LRO;
+ bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+ bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
}
if (CHIP_IS_E1(bp))
@@ -13282,7 +13282,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
- NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
+ NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | NETIF_F_GRO_HW |
NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
if (!chip_is_e1x) {
dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
@@ -13318,6 +13318,8 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
dev->features |= NETIF_F_HIGHDMA;
+ if (dev->features & NETIF_F_LRO)
+ dev->features &= ~NETIF_F_GRO_HW;
/* Add Loopback capability to the device */
dev->hw_features |= NETIF_F_LOOPBACK;
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
index 59c8ec9c1cad..7c560d545c03 100644
--- a/drivers/net/ethernet/broadcom/bnxt/Makefile
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_BNXT) += bnxt_en.o
-bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o
+bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o
bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 61ca4eb7c6fa..1500243b9886 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1,7 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2017 Broadcom Limited
+ * Copyright (c) 2016-2018 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -107,6 +107,7 @@ enum board_idx {
BCM57416_NPAR,
BCM57452,
BCM57454,
+ BCM5745x_NPAR,
BCM58802,
BCM58804,
BCM58808,
@@ -147,6 +148,7 @@ static const struct {
[BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
[BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
+ [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
@@ -156,6 +158,8 @@ static const struct {
};
static const struct pci_device_id bnxt_pci_tbl[] = {
+ { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
{ PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
@@ -209,6 +213,7 @@ MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
static const u16 bnxt_vf_req_snif[] = {
HWRM_FUNC_CFG,
+ HWRM_FUNC_VF_CFG,
HWRM_PORT_PHY_QCFG,
HWRM_CFA_L2_FILTER_ALLOC,
};
@@ -1510,7 +1515,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
(struct rx_tpa_start_cmp_ext *)rxcmp1);
*event |= BNXT_RX_EVENT;
- goto next_rx_no_prod;
+ goto next_rx_no_prod_no_len;
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
@@ -1526,7 +1531,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
rc = 1;
}
*event |= BNXT_RX_EVENT;
- goto next_rx_no_prod;
+ goto next_rx_no_prod_no_len;
}
cons = rxcmp->rx_cmp_opaque;
@@ -1644,7 +1649,10 @@ next_rx:
rxr->rx_prod = NEXT_RX(prod);
rxr->rx_next_cons = NEXT_RX(cons);
-next_rx_no_prod:
+ cpr->rx_packets += 1;
+ cpr->rx_bytes += len;
+
+next_rx_no_prod_no_len:
*raw_cons = tmp_raw_cons;
return rc;
@@ -1706,12 +1714,16 @@ static int bnxt_async_event_process(struct bnxt *bp,
if (BNXT_VF(bp))
goto async_event_process_exit;
- if (data1 & 0x20000) {
+
+ /* print unsupported speed warning in forced speed mode only */
+ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
+ (data1 & 0x20000)) {
u16 fw_speed = link_info->force_link_speed;
u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
- netdev_warn(bp->dev, "Link speed %d no longer supported\n",
- speed);
+ if (speed != SPEED_UNKNOWN)
+ netdev_warn(bp->dev, "Link speed %d no longer supported\n",
+ speed);
}
set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
/* fall thru */
@@ -1798,6 +1810,7 @@ static irqreturn_t bnxt_msix(int irq, void *dev_instance)
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
u32 cons = RING_CMP(cpr->cp_raw_cons);
+ cpr->event_ctr++;
prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
napi_schedule(&bnapi->napi);
return IRQ_HANDLED;
@@ -2021,6 +2034,15 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
break;
}
}
+ if (bp->flags & BNXT_FLAG_DIM) {
+ struct net_dim_sample dim_sample;
+
+ net_dim_sample(cpr->event_ctr,
+ cpr->rx_packets,
+ cpr->rx_bytes,
+ &dim_sample);
+ net_dim(&cpr->dim, dim_sample);
+ }
mmiowb();
return work_done;
}
@@ -2243,6 +2265,9 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
if (rxr->xdp_prog)
bpf_prog_put(rxr->xdp_prog);
+ if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
+ xdp_rxq_info_unreg(&rxr->xdp_rxq);
+
kfree(rxr->rx_tpa);
rxr->rx_tpa = NULL;
@@ -2276,6 +2301,10 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
ring = &rxr->rx_ring_struct;
+ rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
+ if (rc < 0)
+ return rc;
+
rc = bnxt_alloc_ring(bp, ring);
if (rc)
return rc;
@@ -2606,6 +2635,8 @@ static void bnxt_init_cp_rings(struct bnxt *bp)
struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
ring->fw_ring_id = INVALID_HW_RING_ID;
+ cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
+ cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
}
}
@@ -2751,7 +2782,7 @@ void bnxt_set_tpa_flags(struct bnxt *bp)
return;
if (bp->dev->features & NETIF_F_LRO)
bp->flags |= BNXT_FLAG_LRO;
- if (bp->dev->features & NETIF_F_GRO)
+ else if (bp->dev->features & NETIF_F_GRO_HW)
bp->flags |= BNXT_FLAG_GRO;
}
@@ -2830,6 +2861,9 @@ void bnxt_set_ring_params(struct bnxt *bp)
bp->cp_ring_mask = bp->cp_bit - 1;
}
+/* Changing allocation mode of RX rings.
+ * TODO: Update when extending xdp_rxq_info to support allocation modes.
+ */
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
{
if (page_mode) {
@@ -2839,10 +2873,10 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
bp->flags &= ~BNXT_FLAG_AGG_RINGS;
bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
- bp->dev->hw_features &= ~NETIF_F_LRO;
- bp->dev->features &= ~NETIF_F_LRO;
bp->rx_dir = DMA_BIDIRECTIONAL;
bp->rx_skb_func = bnxt_rx_page_skb;
+ /* Disable LRO or GRO_HW */
+ netdev_update_features(bp->dev);
} else {
bp->dev->max_mtu = bp->max_mtu;
bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
@@ -4469,6 +4503,42 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
+static int bnxt_hwrm_get_rings(struct bnxt *bp)
+{
+ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ struct hwrm_func_qcfg_input req = {0};
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10601)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc) {
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return -EIO;
+ }
+
+ hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
+ if (bp->flags & BNXT_FLAG_NEW_RM) {
+ u16 cp, stats;
+
+ hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
+ hw_resc->resv_hw_ring_grps =
+ le32_to_cpu(resp->alloc_hw_ring_grps);
+ hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
+ cp = le16_to_cpu(resp->alloc_cmpl_rings);
+ stats = le16_to_cpu(resp->alloc_stat_ctx);
+ cp = min_t(u16, cp, stats);
+ hw_resc->resv_cp_rings = cp;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return 0;
+}
+
/* Caller must hold bp->hwrm_cmd_lock */
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
{
@@ -4488,55 +4558,283 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
return rc;
}
-static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings)
+static int
+bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+ int ring_grps, int cp_rings, int vnics)
{
struct hwrm_func_cfg_input req = {0};
+ u32 enables = 0;
int rc;
- if (bp->hwrm_spec_code < 0x10601)
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+ enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ req.num_tx_rings = cpu_to_le16(tx_rings);
+ if (bp->flags & BNXT_FLAG_NEW_RM) {
+ enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+ enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= ring_grps ?
+ FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+ enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+
+ req.num_rx_rings = cpu_to_le16(rx_rings);
+ req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+ req.num_cmpl_rings = cpu_to_le16(cp_rings);
+ req.num_stat_ctxs = req.num_cmpl_rings;
+ req.num_vnics = cpu_to_le16(vnics);
+ }
+ if (!enables)
return 0;
- if (BNXT_VF(bp))
+ req.enables = cpu_to_le32(enables);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return -ENOMEM;
+
+ if (bp->hwrm_spec_code < 0x10601)
+ bp->hw_resc.resv_tx_rings = tx_rings;
+
+ rc = bnxt_hwrm_get_rings(bp);
+ return rc;
+}
+
+static int
+bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+ int ring_grps, int cp_rings, int vnics)
+{
+ struct hwrm_func_vf_cfg_input req = {0};
+ u32 enables = 0;
+ int rc;
+
+ if (!(bp->flags & BNXT_FLAG_NEW_RM)) {
+ bp->hw_resc.resv_tx_rings = tx_rings;
return 0;
+ }
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
- req.fid = cpu_to_le16(0xffff);
- req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
- req.num_tx_rings = cpu_to_le16(*tx_rings);
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+ enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+ enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+ enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+
+ req.num_tx_rings = cpu_to_le16(tx_rings);
+ req.num_rx_rings = cpu_to_le16(rx_rings);
+ req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+ req.num_cmpl_rings = cpu_to_le16(cp_rings);
+ req.num_stat_ctxs = req.num_cmpl_rings;
+ req.num_vnics = cpu_to_le16(vnics);
+
+ req.enables = cpu_to_le32(enables);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
+ return -ENOMEM;
+
+ rc = bnxt_hwrm_get_rings(bp);
+ return rc;
+}
+
+static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
+ int cp, int vnic)
+{
+ if (BNXT_PF(bp))
+ return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, vnic);
+ else
+ return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic);
+}
+
+static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
+ bool shared);
+
+static int __bnxt_reserve_rings(struct bnxt *bp)
+{
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ int tx = bp->tx_nr_rings;
+ int rx = bp->rx_nr_rings;
+ int cp = bp->cp_nr_rings;
+ int grp, rx_rings, rc;
+ bool sh = false;
+ int vnic = 1;
+
+ if (bp->hwrm_spec_code < 0x10601)
+ return 0;
+
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+ sh = true;
+ if (bp->flags & BNXT_FLAG_RFS)
+ vnic = rx + 1;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx <<= 1;
+
+ grp = bp->rx_nr_rings;
+ if (tx == hw_resc->resv_tx_rings &&
+ (!(bp->flags & BNXT_FLAG_NEW_RM) ||
+ (rx == hw_resc->resv_rx_rings &&
+ grp == hw_resc->resv_hw_ring_grps &&
+ cp == hw_resc->resv_cp_rings && vnic == hw_resc->resv_vnics)))
+ return 0;
+
+ rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, vnic);
+ if (rc)
return rc;
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings);
- mutex_unlock(&bp->hwrm_cmd_lock);
- if (!rc)
- bp->tx_reserved_rings = *tx_rings;
+ tx = hw_resc->resv_tx_rings;
+ if (bp->flags & BNXT_FLAG_NEW_RM) {
+ rx = hw_resc->resv_rx_rings;
+ cp = hw_resc->resv_cp_rings;
+ grp = hw_resc->resv_hw_ring_grps;
+ vnic = hw_resc->resv_vnics;
+ }
+
+ rx_rings = rx;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ if (rx >= 2) {
+ rx_rings = rx >> 1;
+ } else {
+ if (netif_running(bp->dev))
+ return -ENOMEM;
+
+ bp->flags &= ~BNXT_FLAG_AGG_RINGS;
+ bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
+ bp->dev->hw_features &= ~NETIF_F_LRO;
+ bp->dev->features &= ~NETIF_F_LRO;
+ bnxt_set_ring_params(bp);
+ }
+ }
+ rx_rings = min_t(int, rx_rings, grp);
+ rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx = rx_rings << 1;
+ cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
+ bp->tx_nr_rings = tx;
+ bp->rx_nr_rings = rx_rings;
+ bp->cp_nr_rings = cp;
+
+ if (!tx || !rx || !cp || !grp || !vnic)
+ return -ENOMEM;
+
return rc;
}
-static int bnxt_hwrm_check_tx_rings(struct bnxt *bp, int tx_rings)
+static bool bnxt_need_reserve_rings(struct bnxt *bp)
{
- struct hwrm_func_cfg_input req = {0};
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ int rx = bp->rx_nr_rings;
+ int vnic = 1;
+
+ if (bp->hwrm_spec_code < 0x10601)
+ return false;
+
+ if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
+ return true;
+
+ if (bp->flags & BNXT_FLAG_RFS)
+ vnic = rx + 1;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx <<= 1;
+ if ((bp->flags & BNXT_FLAG_NEW_RM) &&
+ (hw_resc->resv_rx_rings != rx ||
+ hw_resc->resv_cp_rings != bp->cp_nr_rings ||
+ hw_resc->resv_vnics != vnic))
+ return true;
+ return false;
+}
+
+static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+ int ring_grps, int cp_rings)
+{
+ struct hwrm_func_vf_cfg_input req = {0};
+ u32 flags, enables;
int rc;
- if (bp->hwrm_spec_code < 0x10801)
+ if (!(bp->flags & BNXT_FLAG_NEW_RM))
return 0;
- if (BNXT_VF(bp))
- return 0;
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+ flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
+ FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
+ FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
+ FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
+ FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
+ FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
+ enables = FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS;
+
+ req.flags = cpu_to_le32(flags);
+ req.enables = cpu_to_le32(enables);
+ req.num_tx_rings = cpu_to_le16(tx_rings);
+ req.num_rx_rings = cpu_to_le16(rx_rings);
+ req.num_cmpl_rings = cpu_to_le16(cp_rings);
+ req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+ req.num_stat_ctxs = cpu_to_le16(cp_rings);
+ req.num_vnics = cpu_to_le16(1);
+ if (bp->flags & BNXT_FLAG_RFS)
+ req.num_vnics = cpu_to_le16(rx_rings + 1);
+ rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return -ENOMEM;
+ return 0;
+}
+
+static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+ int ring_grps, int cp_rings)
+{
+ struct hwrm_func_cfg_input req = {0};
+ u32 flags, enables;
+ int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(0xffff);
- req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST);
- req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
+ flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
+ enables = FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS;
req.num_tx_rings = cpu_to_le16(tx_rings);
+ if (bp->flags & BNXT_FLAG_NEW_RM) {
+ flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
+ FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
+ FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
+ FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
+ FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
+ enables |= FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
+ FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
+ FUNC_CFG_REQ_ENABLES_NUM_VNICS;
+ req.num_rx_rings = cpu_to_le16(rx_rings);
+ req.num_cmpl_rings = cpu_to_le16(cp_rings);
+ req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+ req.num_stat_ctxs = cpu_to_le16(cp_rings);
+ req.num_vnics = cpu_to_le16(1);
+ if (bp->flags & BNXT_FLAG_RFS)
+ req.num_vnics = cpu_to_le16(rx_rings + 1);
+ }
+ req.flags = cpu_to_le32(flags);
+ req.enables = cpu_to_le32(enables);
rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
return -ENOMEM;
return 0;
}
+static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+ int ring_grps, int cp_rings)
+{
+ if (bp->hwrm_spec_code < 0x10801)
+ return 0;
+
+ if (BNXT_PF(bp))
+ return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
+ ring_grps, cp_rings);
+
+ return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
+ cp_rings);
+}
+
static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
{
@@ -4579,6 +4877,36 @@ static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
req->flags = cpu_to_le16(flags);
}
+int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
+{
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_coal coal;
+ unsigned int grp_idx;
+
+ /* Tick values in micro seconds.
+ * 1 coal_buf x bufs_per_record = 1 completion record.
+ */
+ memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
+
+ coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
+ coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
+
+ if (!bnapi->rx_ring)
+ return -ENODEV;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
+ HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
+
+ bnxt_hwrm_set_coal_params(&coal, &req_rx);
+
+ grp_idx = bnapi->index;
+ req_rx.ring_id = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
+
+ return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
+ HWRM_CMD_TIMEOUT);
+}
+
int bnxt_hwrm_set_coal(struct bnxt *bp)
{
int i, rc = 0;
@@ -4732,11 +5060,60 @@ func_qcfg_exit:
return rc;
}
-static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+static int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
+{
+ struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_resource_qcaps_input req = {0};
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc) {
+ rc = -EIO;
+ goto hwrm_func_resc_qcaps_exit;
+ }
+
+ hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
+ hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+ hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
+ hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+ hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
+ hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+ hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
+ hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+ hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
+ hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
+ hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
+ hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+ hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
+ hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
+ hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
+ hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+
+ if (BNXT_PF(bp)) {
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ pf->vf_resv_strategy =
+ le16_to_cpu(resp->vf_reservation_strategy);
+ if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL)
+ pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
+ }
+hwrm_func_resc_qcaps_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc = 0;
struct hwrm_func_qcaps_input req = {0};
struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ u32 flags;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
req.fid = cpu_to_le16(0xffff);
@@ -4746,16 +5123,27 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (rc)
goto hwrm_func_qcaps_exit;
- if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED))
+ flags = le32_to_cpu(resp->flags);
+ if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
bp->flags |= BNXT_FLAG_ROCEV1_CAP;
- if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED))
+ if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
bp->flags |= BNXT_FLAG_ROCEV2_CAP;
bp->tx_push_thresh = 0;
- if (resp->flags &
- cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
+ if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
+ hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+ hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+ hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+ hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+ hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
+ if (!hw_resc->max_hw_ring_grps)
+ hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
+ hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+ hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
+ hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+
if (BNXT_PF(bp)) {
struct bnxt_pf_info *pf = &bp->pf;
@@ -4763,16 +5151,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->port_id = le16_to_cpu(resp->port_id);
bp->dev->dev_port = pf->port_id;
memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
- pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
- pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
- pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
- pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
- pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
- if (!pf->max_hw_ring_grps)
- pf->max_hw_ring_grps = pf->max_tx_rings;
- pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
- pf->max_vnics = le16_to_cpu(resp->max_vnics);
- pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
pf->max_vfs = le16_to_cpu(resp->max_vfs);
pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
@@ -4781,26 +5159,13 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
- if (resp->flags &
- cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED))
+ if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
bp->flags |= BNXT_FLAG_WOL_CAP;
} else {
#ifdef CONFIG_BNXT_SRIOV
struct bnxt_vf_info *vf = &bp->vf;
vf->fw_fid = le16_to_cpu(resp->fid);
-
- vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
- vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
- vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
- vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
- vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
- if (!vf->max_hw_ring_grps)
- vf->max_hw_ring_grps = vf->max_tx_rings;
- vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
- vf->max_vnics = le16_to_cpu(resp->max_vnics);
- vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
-
memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
#endif
}
@@ -4810,6 +5175,21 @@ hwrm_func_qcaps_exit:
return rc;
}
+static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+{
+ int rc;
+
+ rc = __bnxt_hwrm_func_qcaps(bp);
+ if (rc)
+ return rc;
+ if (bp->hwrm_spec_code >= 0x10803) {
+ rc = bnxt_hwrm_func_resc_qcaps(bp);
+ if (!rc)
+ bp->flags |= BNXT_FLAG_NEW_RM;
+ }
+ return 0;
+}
+
static int bnxt_hwrm_func_reset(struct bnxt *bp)
{
struct hwrm_func_reset_input req = {0};
@@ -4879,23 +5259,24 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
- bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 |
- resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd;
- if (resp->hwrm_intf_maj < 1) {
+ bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
+ resp->hwrm_intf_min_8b << 8 |
+ resp->hwrm_intf_upd_8b;
+ if (resp->hwrm_intf_maj_8b < 1) {
netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
- resp->hwrm_intf_maj, resp->hwrm_intf_min,
- resp->hwrm_intf_upd);
+ resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
+ resp->hwrm_intf_upd_8b);
netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
}
snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
- resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
- resp->hwrm_fw_rsvd);
+ resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
+ resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
if (!bp->hwrm_cmd_timeout)
bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
- if (resp->hwrm_intf_maj >= 1)
+ if (resp->hwrm_intf_maj_8b >= 1)
bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
bp->chip_num = le16_to_cpu(resp->chip_num);
@@ -5031,6 +5412,28 @@ static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
return rc;
}
+static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
+{
+ struct hwrm_func_cfg_input req = {0};
+ int rc;
+
+ if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
+ req.cache_linesize = FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64;
+ if (size == 128)
+ req.cache_linesize =
+ FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128;
+
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ rc = -EIO;
+ return rc;
+}
+
static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
@@ -5166,15 +5569,6 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
rc);
goto err_out;
}
- if (bp->tx_reserved_rings != bp->tx_nr_rings) {
- int tx = bp->tx_nr_rings;
-
- if (bnxt_hwrm_reserve_tx_rings(bp, &tx) ||
- tx < bp->tx_nr_rings) {
- rc = -ENOMEM;
- goto err_out;
- }
- }
}
rc = bnxt_hwrm_ring_alloc(bp);
@@ -5394,79 +5788,45 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
#ifdef CONFIG_RFS_ACCEL
static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- return bp->vf.max_rsscos_ctxs;
-#endif
- return bp->pf.max_rsscos_ctxs;
+ return bp->hw_resc.max_rsscos_ctxs;
}
static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- return bp->vf.max_vnics;
-#endif
- return bp->pf.max_vnics;
+ return bp->hw_resc.max_vnics;
}
#endif
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- return bp->vf.max_stat_ctxs;
-#endif
- return bp->pf.max_stat_ctxs;
+ return bp->hw_resc.max_stat_ctxs;
}
void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- bp->vf.max_stat_ctxs = max;
- else
-#endif
- bp->pf.max_stat_ctxs = max;
+ bp->hw_resc.max_stat_ctxs = max;
}
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- return bp->vf.max_cp_rings;
-#endif
- return bp->pf.max_cp_rings;
+ return bp->hw_resc.max_cp_rings;
}
void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- bp->vf.max_cp_rings = max;
- else
-#endif
- bp->pf.max_cp_rings = max;
+ bp->hw_resc.max_cp_rings = max;
}
static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- return min_t(unsigned int, bp->vf.max_irqs,
- bp->vf.max_cp_rings);
-#endif
- return min_t(unsigned int, bp->pf.max_irqs, bp->pf.max_cp_rings);
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+
+ return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
}
void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- bp->vf.max_irqs = max_irqs;
- else
-#endif
- bp->pf.max_irqs = max_irqs;
+ bp->hw_resc.max_irqs = max_irqs;
}
static int bnxt_init_msix(struct bnxt *bp)
@@ -5567,6 +5927,36 @@ static void bnxt_clear_int_mode(struct bnxt *bp)
bp->flags &= ~BNXT_FLAG_USING_MSIX;
}
+static int bnxt_reserve_rings(struct bnxt *bp)
+{
+ int orig_cp = bp->hw_resc.resv_cp_rings;
+ int tcs = netdev_get_num_tc(bp->dev);
+ int rc;
+
+ if (!bnxt_need_reserve_rings(bp))
+ return 0;
+
+ rc = __bnxt_reserve_rings(bp);
+ if (rc) {
+ netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc);
+ return rc;
+ }
+ if ((bp->flags & BNXT_FLAG_NEW_RM) && bp->cp_nr_rings > orig_cp) {
+ bnxt_clear_int_mode(bp);
+ rc = bnxt_init_int_mode(bp);
+ if (rc)
+ return rc;
+ }
+ if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
+ netdev_err(bp->dev, "tx ring reservation failure\n");
+ netdev_reset_tc(bp->dev);
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ return -ENOMEM;
+ }
+ bp->num_stat_ctxs = bp->cp_nr_rings;
+ return 0;
+}
+
static void bnxt_free_irq(struct bnxt *bp)
{
struct bnxt_irq *irq;
@@ -5692,8 +6082,14 @@ static void bnxt_disable_napi(struct bnxt *bp)
if (!bp->bnapi)
return;
- for (i = 0; i < bp->cp_nr_rings; i++)
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
+
+ if (bp->bnapi[i]->rx_ring)
+ cancel_work_sync(&cpr->dim.work);
+
napi_disable(&bp->bnapi[i]->napi);
+ }
}
static void bnxt_enable_napi(struct bnxt *bp)
@@ -5701,7 +6097,13 @@ static void bnxt_enable_napi(struct bnxt *bp)
int i;
for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
bp->bnapi[i]->in_reset = false;
+
+ if (bp->bnapi[i]->rx_ring) {
+ INIT_WORK(&cpr->dim.work, bnxt_dim_work);
+ cpr->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ }
napi_enable(&bp->bnapi[i]->napi);
}
}
@@ -6311,6 +6713,10 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
bnxt_preset_reg_win(bp);
netif_carrier_off(bp->dev);
if (irq_re_init) {
+ rc = bnxt_reserve_rings(bp);
+ if (rc)
+ return rc;
+
rc = bnxt_setup_int_mode(bp);
if (rc) {
netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
@@ -6446,23 +6852,13 @@ static bool bnxt_drv_busy(struct bnxt *bp)
test_bit(BNXT_STATE_READ_STATS, &bp->state));
}
-int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
+ bool link_re_init)
{
- int rc = 0;
-
-#ifdef CONFIG_BNXT_SRIOV
- if (bp->sriov_cfg) {
- rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
- !bp->sriov_cfg,
- BNXT_SRIOV_CFG_WAIT_TMO);
- if (rc)
- netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
- }
-
/* Close the VF-reps before closing PF */
if (BNXT_PF(bp))
bnxt_vf_reps_close(bp);
-#endif
+
/* Change device state to avoid TX queue wake up's */
bnxt_tx_disable(bp);
@@ -6485,6 +6881,22 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
bnxt_del_napi(bp);
}
bnxt_free_mem(bp, irq_re_init);
+}
+
+int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+ int rc = 0;
+
+#ifdef CONFIG_BNXT_SRIOV
+ if (bp->sriov_cfg) {
+ rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
+ !bp->sriov_cfg,
+ BNXT_SRIOV_CFG_WAIT_TMO);
+ if (rc)
+ netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
+ }
+#endif
+ __bnxt_close_nic(bp, irq_re_init, link_re_init);
return rc;
}
@@ -6764,13 +7176,26 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
max_rss_ctxs = max_vnics;
if (vnics > max_vnics || vnics > max_rss_ctxs) {
- netdev_warn(bp->dev,
- "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
- min(max_rss_ctxs - 1, max_vnics - 1));
+ if (bp->rx_nr_rings > 1)
+ netdev_warn(bp->dev,
+ "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
+ min(max_rss_ctxs - 1, max_vnics - 1));
return false;
}
- return true;
+ if (!(bp->flags & BNXT_FLAG_NEW_RM))
+ return true;
+
+ if (vnics == bp->hw_resc.resv_vnics)
+ return true;
+
+ bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, vnics);
+ if (vnics <= bp->hw_resc.resv_vnics)
+ return true;
+
+ netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
+ bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 1);
+ return false;
#else
return false;
#endif
@@ -6784,6 +7209,15 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
features &= ~NETIF_F_NTUPLE;
+ if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
+ features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+
+ if (!(features & NETIF_F_GRO))
+ features &= ~NETIF_F_GRO_HW;
+
+ if (features & NETIF_F_GRO_HW)
+ features &= ~NETIF_F_LRO;
+
/* Both CTAG and STAG VLAN accelaration on the RX side have to be
* turned on or off together.
*/
@@ -6817,9 +7251,9 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
bool update_tpa = false;
flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
- if ((features & NETIF_F_GRO) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
+ if (features & NETIF_F_GRO_HW)
flags |= BNXT_FLAG_GRO;
- if (features & NETIF_F_LRO)
+ else if (features & NETIF_F_LRO)
flags |= BNXT_FLAG_LRO;
if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
@@ -7096,7 +7530,8 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
{
int max_rx, max_tx, tx_sets = 1;
int tx_rings_needed;
- int rc;
+ int rx_rings = rx;
+ int cp, rc;
if (tcs)
tx_sets = tcs;
@@ -7112,7 +7547,10 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
if (max_tx < tx_rings_needed)
return -ENOMEM;
- return bnxt_hwrm_check_tx_rings(bp, tx_rings_needed);
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx_rings <<= 1;
+ cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
+ return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp);
}
static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
@@ -7346,7 +7784,8 @@ static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
{
struct bnxt *bp = cb_priv;
- if (!bnxt_tc_flower_enabled(bp) || !tc_can_offload(bp->dev))
+ if (!bnxt_tc_flower_enabled(bp) ||
+ !tc_cls_can_offload_and_chain0(bp->dev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -7717,12 +8156,8 @@ int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr)
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- /* In SRIOV each PF-pool (PF + child VFs) serves as a
- * switching domain, the PF's perm mac-addr can be used
- * as the unique parent-id
- */
- attr->u.ppid.id_len = ETH_ALEN;
- ether_addr_copy(attr->u.ppid.id, bp->pf.mac_addr);
+ attr->u.ppid.id_len = sizeof(bp->switch_id);
+ memcpy(attr->u.ppid.id, bp->switch_id, attr->u.ppid.id_len);
break;
default:
return -EOPNOTSUPP;
@@ -7800,8 +8235,6 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_dcb_free(bp);
kfree(bp->edev);
bp->edev = NULL;
- if (bp->xdp_prog)
- bpf_prog_put(bp->xdp_prog);
bnxt_cleanup_pci(bp);
free_netdev(dev);
}
@@ -7869,24 +8302,14 @@ static int bnxt_get_max_irq(struct pci_dev *pdev)
static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
int *max_cp)
{
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
int max_ring_grps = 0;
-#ifdef CONFIG_BNXT_SRIOV
- if (!BNXT_PF(bp)) {
- *max_tx = bp->vf.max_tx_rings;
- *max_rx = bp->vf.max_rx_rings;
- *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
- *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs);
- max_ring_grps = bp->vf.max_hw_ring_grps;
- } else
-#endif
- {
- *max_tx = bp->pf.max_tx_rings;
- *max_rx = bp->pf.max_rx_rings;
- *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
- *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs);
- max_ring_grps = bp->pf.max_hw_ring_grps;
- }
+ *max_tx = hw_resc->max_tx_rings;
+ *max_rx = hw_resc->max_rx_rings;
+ *max_cp = min_t(int, hw_resc->max_irqs, hw_resc->max_cp_rings);
+ *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
+ max_ring_grps = hw_resc->max_hw_ring_grps;
if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
*max_cp -= 1;
*max_rx -= 2;
@@ -7922,8 +8345,8 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
if (rc)
return rc;
bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
- bp->dev->hw_features &= ~NETIF_F_LRO;
- bp->dev->features &= ~NETIF_F_LRO;
+ bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+ bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
bnxt_set_ring_params(bp);
}
@@ -7951,6 +8374,17 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
return rc;
}
+/* In initial default shared ring setting, each shared ring must have a
+ * RX/TX ring pair.
+ */
+static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
+{
+ bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
+ bp->rx_nr_rings = bp->cp_nr_rings;
+ bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+}
+
static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
{
int dflt_rings, max_rx_rings, max_tx_rings, rc;
@@ -7966,14 +8400,26 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
return rc;
bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
+ if (sh)
+ bnxt_trim_dflt_sh_rings(bp);
+ else
+ bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
- rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc);
+ rc = __bnxt_reserve_rings(bp);
if (rc)
netdev_warn(bp->dev, "Unable to reserve tx rings\n");
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ if (sh)
+ bnxt_trim_dflt_sh_rings(bp);
- bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
- bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
- bp->tx_nr_rings + bp->rx_nr_rings;
+ /* Rings may have been trimmed, re-reserve the trimmed rings. */
+ if (bnxt_need_reserve_rings(bp)) {
+ rc = __bnxt_reserve_rings(bp);
+ if (rc)
+ netdev_warn(bp->dev, "2nd rings reservation failed.\n");
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ }
bp->num_stat_ctxs = bp->cp_nr_rings;
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bp->rx_nr_rings++;
@@ -7982,11 +8428,23 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
return rc;
}
-void bnxt_restore_pf_fw_resources(struct bnxt *bp)
+int bnxt_restore_pf_fw_resources(struct bnxt *bp)
{
+ int rc;
+
ASSERT_RTNL();
+ if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
+ return 0;
+
bnxt_hwrm_func_qcaps(bp);
- bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
+ __bnxt_close_nic(bp, true, false);
+ bnxt_clear_int_mode(bp);
+ rc = bnxt_init_int_mode(bp);
+ if (rc)
+ dev_close(bp->dev);
+ else
+ rc = bnxt_open_nic(bp, true, false);
+ return rc;
}
static int bnxt_init_mac_addr(struct bnxt *bp)
@@ -8000,7 +8458,7 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
struct bnxt_vf_info *vf = &bp->vf;
if (is_valid_ether_addr(vf->mac_addr)) {
- /* overwrite netdev dev_adr with admin VF MAC */
+ /* overwrite netdev dev_addr with admin VF MAC */
memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
} else {
eth_hw_addr_random(bp->dev);
@@ -8106,7 +8564,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
+ if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
+ dev->hw_features |= NETIF_F_GRO_HW;
dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
+ if (dev->features & NETIF_F_GRO_HW)
+ dev->features &= ~NETIF_F_LRO;
dev->priv_flags |= IFF_UNICAST_FLT;
#ifdef CONFIG_BNXT_SRIOV
@@ -8208,6 +8670,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
else
device_set_wakeup_capable(&pdev->dev, false);
+ bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
+
if (BNXT_PF(bp)) {
if (!bnxt_pf_wq) {
bnxt_pf_wq =
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 5359a1f0045f..1989c470172c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1,7 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2017 Broadcom Limited
+ * Copyright (c) 2016-2018 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -12,10 +12,10 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.8.0"
+#define DRV_MODULE_VERSION "1.9.0"
#define DRV_VER_MAJ 1
-#define DRV_VER_MIN 8
+#define DRV_VER_MIN 9
#define DRV_VER_UPD 0
#include <linux/interrupt.h>
@@ -23,6 +23,8 @@
#include <net/devlink.h>
#include <net/dst_metadata.h>
#include <net/switchdev.h>
+#include <net/xdp.h>
+#include <linux/net_dim.h>
struct tx_bd {
__le32 tx_bd_len_flags_type;
@@ -607,6 +609,17 @@ struct bnxt_tx_ring_info {
struct bnxt_ring_struct tx_ring_struct;
};
+struct bnxt_coal {
+ u16 coal_ticks;
+ u16 coal_ticks_irq;
+ u16 coal_bufs;
+ u16 coal_bufs_irq;
+ /* RING_IDLE enabled when coal ticks < idle_thresh */
+ u16 idle_thresh;
+ u8 bufs_per_record;
+ u8 budget;
+};
+
struct bnxt_tpa_info {
void *data;
u8 *data_ptr;
@@ -664,12 +677,20 @@ struct bnxt_rx_ring_info {
struct bnxt_ring_struct rx_ring_struct;
struct bnxt_ring_struct rx_agg_ring_struct;
+ struct xdp_rxq_info xdp_rxq;
};
struct bnxt_cp_ring_info {
u32 cp_raw_cons;
void __iomem *cp_doorbell;
+ struct bnxt_coal rx_ring_coal;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 event_ctr;
+
+ struct net_dim dim;
+
struct tx_cmp *cp_desc_ring[MAX_CP_PAGES];
dma_addr_t cp_desc_mapping[MAX_CP_PAGES];
@@ -755,19 +776,38 @@ struct bnxt_vnic_info {
#define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10
};
-#if defined(CONFIG_BNXT_SRIOV)
-struct bnxt_vf_info {
- u16 fw_fid;
- u8 mac_addr[ETH_ALEN];
+struct bnxt_hw_resc {
+ u16 min_rsscos_ctxs;
u16 max_rsscos_ctxs;
+ u16 min_cp_rings;
u16 max_cp_rings;
+ u16 resv_cp_rings;
+ u16 min_tx_rings;
u16 max_tx_rings;
+ u16 resv_tx_rings;
+ u16 min_rx_rings;
u16 max_rx_rings;
+ u16 resv_rx_rings;
+ u16 min_hw_ring_grps;
u16 max_hw_ring_grps;
+ u16 resv_hw_ring_grps;
+ u16 min_l2_ctxs;
u16 max_l2_ctxs;
- u16 max_irqs;
+ u16 min_vnics;
u16 max_vnics;
+ u16 resv_vnics;
+ u16 min_stat_ctxs;
u16 max_stat_ctxs;
+ u16 max_irqs;
+};
+
+#if defined(CONFIG_BNXT_SRIOV)
+struct bnxt_vf_info {
+ u16 fw_fid;
+ u8 mac_addr[ETH_ALEN]; /* PF assigned MAC Address */
+ u8 vf_mac_addr[ETH_ALEN]; /* VF assigned MAC address, only
+ * stored by PF.
+ */
u16 vlan;
u32 flags;
#define BNXT_VF_QOS 0x1
@@ -788,15 +828,6 @@ struct bnxt_pf_info {
u16 fw_fid;
u16 port_id;
u8 mac_addr[ETH_ALEN];
- u16 max_rsscos_ctxs;
- u16 max_cp_rings;
- u16 max_tx_rings; /* HW assigned max tx rings for this PF */
- u16 max_rx_rings; /* HW assigned max rx rings for this PF */
- u16 max_hw_ring_grps;
- u16 max_irqs;
- u16 max_l2_ctxs;
- u16 max_vnics;
- u16 max_stat_ctxs;
u32 first_vf_id;
u16 active_vfs;
u16 max_vfs;
@@ -808,6 +839,9 @@ struct bnxt_pf_info {
u32 max_rx_wm_flows;
unsigned long *vf_event_bmap;
u16 hwrm_cmd_req_pages;
+ u8 vf_resv_strategy;
+#define BNXT_VF_RESV_STRATEGY_MAXIMAL 0
+#define BNXT_VF_RESV_STRATEGY_MINIMAL 1
void *hwrm_cmd_req_addr[4];
dma_addr_t hwrm_cmd_req_dma_addr[4];
struct bnxt_vf_info *vf;
@@ -944,17 +978,6 @@ struct bnxt_test_info {
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
#define BNXT_CAG_REG_BASE 0x300000
-struct bnxt_coal {
- u16 coal_ticks;
- u16 coal_ticks_irq;
- u16 coal_bufs;
- u16 coal_bufs_irq;
- /* RING_IDLE enabled when coal ticks < idle_thresh */
- u16 idle_thresh;
- u8 bufs_per_record;
- u8 budget;
-};
-
struct bnxt_tc_flow_stats {
u64 packets;
u64 bytes;
@@ -1126,6 +1149,8 @@ struct bnxt {
#define BNXT_FLAG_DOUBLE_DB 0x400000
#define BNXT_FLAG_FW_DCBX_AGENT 0x800000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
+ #define BNXT_FLAG_DIM 0x2000000
+ #define BNXT_FLAG_NEW_RM 0x8000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
@@ -1185,7 +1210,6 @@ struct bnxt {
int tx_nr_rings;
int tx_nr_rings_per_tc;
int tx_nr_rings_xdp;
- int tx_reserved_rings;
int tx_wake_thresh;
int tx_push_thresh;
@@ -1297,6 +1321,7 @@ struct bnxt {
#define BNXT_LINK_SPEED_CHNG_SP_EVENT 14
#define BNXT_FLOW_STATS_SP_EVENT 15
+ struct bnxt_hw_resc hw_resc;
struct bnxt_pf_info pf;
#ifdef CONFIG_BNXT_SRIOV
int nr_vfs;
@@ -1346,6 +1371,7 @@ struct bnxt {
enum devlink_eswitch_mode eswitch_mode;
struct bnxt_vf_rep **vf_reps; /* array of vf-rep ptrs */
u16 *cfa_code_map; /* cfa_code -> vf_idx map */
+ u8 switch_id[8];
struct bnxt_tc_info *tc_info;
};
@@ -1421,6 +1447,9 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
-void bnxt_restore_pf_fw_resources(struct bnxt *bp);
+int bnxt_restore_pf_fw_resources(struct bnxt *bp);
int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr);
+void bnxt_dim_work(struct work_struct *work);
+int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi);
+
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index fed37cd9ae1d..3c746f2d9ed8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -278,12 +278,11 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
n = IEEE_8021QAZ_MAX_TCS;
data_len = sizeof(*data) + sizeof(*fw_app) * n;
- data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping,
- GFP_KERNEL);
+ data = dma_zalloc_coherent(&bp->pdev->dev, data_len, &mapping,
+ GFP_KERNEL);
if (!data)
return -ENOMEM;
- memset(data, 0, data_len);
bnxt_hwrm_cmd_hdr_init(bp, &get, HWRM_FW_GET_STRUCTURED_DATA, -1, -1);
get.dest_data_addr = cpu_to_le64(mapping);
get.structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
new file mode 100644
index 000000000000..408dd190331e
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
@@ -0,0 +1,32 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2017-2018 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/net_dim.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+
+void bnxt_dim_work(struct work_struct *work)
+{
+ struct net_dim *dim = container_of(work, struct net_dim,
+ work);
+ struct bnxt_cp_ring_info *cpr = container_of(dim,
+ struct bnxt_cp_ring_info,
+ dim);
+ struct bnxt_napi *bnapi = container_of(cpr,
+ struct bnxt_napi,
+ cp_ring);
+ struct net_dim_cq_moder cur_profile = net_dim_get_profile(dim->mode,
+ dim->profile_ix);
+
+ cpr->rx_ring_coal.coal_ticks = cur_profile.usec;
+ cpr->rx_ring_coal.coal_bufs = cur_profile.pkts;
+
+ bnxt_hwrm_set_ring_coal(bnapi->bp, bnapi);
+ dim->state = NET_DIM_START_MEASURE;
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index b13ce5ebde8d..1801582076be 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -49,6 +49,8 @@ static int bnxt_get_coalesce(struct net_device *dev,
memset(coal, 0, sizeof(*coal));
+ coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM;
+
hw_coal = &bp->rx_coal;
mult = hw_coal->bufs_per_record;
coal->rx_coalesce_usecs = hw_coal->coal_ticks;
@@ -77,6 +79,15 @@ static int bnxt_set_coalesce(struct net_device *dev,
int rc = 0;
u16 mult;
+ if (coal->use_adaptive_rx_coalesce) {
+ bp->flags |= BNXT_FLAG_DIM;
+ } else {
+ if (bp->flags & BNXT_FLAG_DIM) {
+ bp->flags &= ~(BNXT_FLAG_DIM);
+ goto reset_coalesce;
+ }
+ }
+
hw_coal = &bp->rx_coal;
mult = hw_coal->bufs_per_record;
hw_coal->coal_ticks = coal->rx_coalesce_usecs;
@@ -104,6 +115,7 @@ static int bnxt_set_coalesce(struct net_device *dev,
update_stats = true;
}
+reset_coalesce:
if (netif_running(dev)) {
if (update_stats) {
rc = bnxt_close_nic(bp, true, false);
@@ -1376,6 +1388,9 @@ static int bnxt_firmware_reset(struct net_device *dev,
req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
break;
+ case BNXT_FW_RESET_AP:
+ req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP;
+ break;
default:
return -EINVAL;
}
@@ -2522,6 +2537,14 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP);
if (!rc)
netdev_info(dev, "Reset request successful. Reload driver to complete reset\n");
+ } else if (*flags == ETH_RESET_AP) {
+ /* This feature is not supported in older firmware versions */
+ if (bp->hwrm_spec_code < 0x10803)
+ return -EOPNOTSUPP;
+
+ rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_AP);
+ if (!rc)
+ netdev_info(dev, "Reset Application Processor request successful.\n");
} else {
rc = -EINVAL;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index ff601b42fcc8..836ef682f24c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -34,6 +34,7 @@ struct bnxt_led_cfg {
#define BNXT_LED_DFLT_ENABLES(x) \
cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x)))
+#define BNXT_FW_RESET_AP 0xfffe
#define BNXT_FW_RESET_CHIP 0xffff
extern const struct ethtool_ops bnxt_ethtool_ops;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index c99f4d0880e4..82d17f8cc0db 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -1,2437 +1,2700 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2017 Broadcom Limited
+ * Copyright (c) 2016-2018 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
+ *
+ * DO NOT MODIFY!!! This file is automatically generated.
*/
-#ifndef BNXT_HSI_H
-#define BNXT_HSI_H
+#ifndef _BNXT_HSI_H_
+#define _BNXT_HSI_H_
+
+/* hwrm_cmd_hdr (size:128b/16B) */
+struct hwrm_cmd_hdr {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_resp_hdr (size:64b/8B) */
+struct hwrm_resp_hdr {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
+
+#define CMD_DISCR_TLV_ENCAP 0x8000UL
+#define CMD_DISCR_LAST CMD_DISCR_TLV_ENCAP
+
+
+#define TLV_TYPE_HWRM_REQUEST 0x1UL
+#define TLV_TYPE_HWRM_RESPONSE 0x2UL
+#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL
+#define TLV_TYPE_ENGINE_CKV_DEVICE_SERIAL_NUMBER 0x8001UL
+#define TLV_TYPE_ENGINE_CKV_NONCE 0x8002UL
+#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL
+#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL
+#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL
+#define TLV_TYPE_ENGINE_CKV_ALGORITHMS 0x8006UL
+#define TLV_TYPE_ENGINE_CKV_ECC_PUBLIC_KEY 0x8007UL
+#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL
+#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE
+
+
+/* tlv (size:64b/8B) */
+struct tlv {
+ __le16 cmd_discr;
+ u8 reserved_8b;
+ u8 flags;
+ #define TLV_FLAGS_MORE 0x1UL
+ #define TLV_FLAGS_MORE_LAST 0x0UL
+ #define TLV_FLAGS_MORE_NOT_LAST 0x1UL
+ #define TLV_FLAGS_REQUIRED 0x2UL
+ #define TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
+ #define TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
+ #define TLV_FLAGS_REQUIRED_LAST TLV_FLAGS_REQUIRED_YES
+ __le16 tlv_type;
+ __le16 length;
+};
+
+/* input (size:128b/16B) */
+struct input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
-/* HSI and HWRM Specification 1.8.3 */
-#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 8
-#define HWRM_VERSION_UPDATE 3
+/* output (size:64b/8B) */
+struct output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
-#define HWRM_VERSION_RSVD 1 /* non-zero means beta version */
+/* hwrm_short_input (size:128b/16B) */
+struct hwrm_short_input {
+ __le16 req_type;
+ __le16 signature;
+ #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL
+ #define SHORT_REQ_SIGNATURE_LAST SHORT_REQ_SIGNATURE_SHORT_CMD
+ __le16 unused_0;
+ __le16 size;
+ __le64 req_addr;
+};
-#define HWRM_VERSION_STR "1.8.3.1"
-/*
- * Following is the signature for HWRM message field that indicates not
- * applicable (All F's). Need to cast it the size of the field if needed.
- */
-#define HWRM_NA_SIGNATURE ((__le32)(-1))
-#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */
-#define HWRM_MAX_RESP_LEN (280) /* hwrm_selftest_qlist */
-#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */
-#define HW_HASH_KEY_SIZE 40
-#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */
-
-/* Statistics Ejection Buffer Completion Record (16 bytes) */
+/* cmd_nums (size:64b/8B) */
+struct cmd_nums {
+ __le16 req_type;
+ #define HWRM_VER_GET 0x0UL
+ #define HWRM_FUNC_BUF_UNRGTR 0xeUL
+ #define HWRM_FUNC_VF_CFG 0xfUL
+ #define HWRM_RESERVED1 0x10UL
+ #define HWRM_FUNC_RESET 0x11UL
+ #define HWRM_FUNC_GETFID 0x12UL
+ #define HWRM_FUNC_VF_ALLOC 0x13UL
+ #define HWRM_FUNC_VF_FREE 0x14UL
+ #define HWRM_FUNC_QCAPS 0x15UL
+ #define HWRM_FUNC_QCFG 0x16UL
+ #define HWRM_FUNC_CFG 0x17UL
+ #define HWRM_FUNC_QSTATS 0x18UL
+ #define HWRM_FUNC_CLR_STATS 0x19UL
+ #define HWRM_FUNC_DRV_UNRGTR 0x1aUL
+ #define HWRM_FUNC_VF_RESC_FREE 0x1bUL
+ #define HWRM_FUNC_VF_VNIC_IDS_QUERY 0x1cUL
+ #define HWRM_FUNC_DRV_RGTR 0x1dUL
+ #define HWRM_FUNC_DRV_QVER 0x1eUL
+ #define HWRM_FUNC_BUF_RGTR 0x1fUL
+ #define HWRM_PORT_PHY_CFG 0x20UL
+ #define HWRM_PORT_MAC_CFG 0x21UL
+ #define HWRM_PORT_TS_QUERY 0x22UL
+ #define HWRM_PORT_QSTATS 0x23UL
+ #define HWRM_PORT_LPBK_QSTATS 0x24UL
+ #define HWRM_PORT_CLR_STATS 0x25UL
+ #define HWRM_PORT_LPBK_CLR_STATS 0x26UL
+ #define HWRM_PORT_PHY_QCFG 0x27UL
+ #define HWRM_PORT_MAC_QCFG 0x28UL
+ #define HWRM_PORT_MAC_PTP_QCFG 0x29UL
+ #define HWRM_PORT_PHY_QCAPS 0x2aUL
+ #define HWRM_PORT_PHY_I2C_WRITE 0x2bUL
+ #define HWRM_PORT_PHY_I2C_READ 0x2cUL
+ #define HWRM_PORT_LED_CFG 0x2dUL
+ #define HWRM_PORT_LED_QCFG 0x2eUL
+ #define HWRM_PORT_LED_QCAPS 0x2fUL
+ #define HWRM_QUEUE_QPORTCFG 0x30UL
+ #define HWRM_QUEUE_QCFG 0x31UL
+ #define HWRM_QUEUE_CFG 0x32UL
+ #define HWRM_FUNC_VLAN_CFG 0x33UL
+ #define HWRM_FUNC_VLAN_QCFG 0x34UL
+ #define HWRM_QUEUE_PFCENABLE_QCFG 0x35UL
+ #define HWRM_QUEUE_PFCENABLE_CFG 0x36UL
+ #define HWRM_QUEUE_PRI2COS_QCFG 0x37UL
+ #define HWRM_QUEUE_PRI2COS_CFG 0x38UL
+ #define HWRM_QUEUE_COS2BW_QCFG 0x39UL
+ #define HWRM_QUEUE_COS2BW_CFG 0x3aUL
+ #define HWRM_QUEUE_DSCP_QCAPS 0x3bUL
+ #define HWRM_QUEUE_DSCP2PRI_QCFG 0x3cUL
+ #define HWRM_QUEUE_DSCP2PRI_CFG 0x3dUL
+ #define HWRM_VNIC_ALLOC 0x40UL
+ #define HWRM_VNIC_FREE 0x41UL
+ #define HWRM_VNIC_CFG 0x42UL
+ #define HWRM_VNIC_QCFG 0x43UL
+ #define HWRM_VNIC_TPA_CFG 0x44UL
+ #define HWRM_VNIC_TPA_QCFG 0x45UL
+ #define HWRM_VNIC_RSS_CFG 0x46UL
+ #define HWRM_VNIC_RSS_QCFG 0x47UL
+ #define HWRM_VNIC_PLCMODES_CFG 0x48UL
+ #define HWRM_VNIC_PLCMODES_QCFG 0x49UL
+ #define HWRM_VNIC_QCAPS 0x4aUL
+ #define HWRM_RING_ALLOC 0x50UL
+ #define HWRM_RING_FREE 0x51UL
+ #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL
+ #define HWRM_RING_RESET 0x5eUL
+ #define HWRM_RING_GRP_ALLOC 0x60UL
+ #define HWRM_RING_GRP_FREE 0x61UL
+ #define HWRM_RESERVED5 0x64UL
+ #define HWRM_RESERVED6 0x65UL
+ #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL
+ #define HWRM_VNIC_RSS_COS_LB_CTX_FREE 0x71UL
+ #define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
+ #define HWRM_CFA_L2_FILTER_FREE 0x91UL
+ #define HWRM_CFA_L2_FILTER_CFG 0x92UL
+ #define HWRM_CFA_L2_SET_RX_MASK 0x93UL
+ #define HWRM_CFA_VLAN_ANTISPOOF_CFG 0x94UL
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC 0x95UL
+ #define HWRM_CFA_TUNNEL_FILTER_FREE 0x96UL
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC 0x97UL
+ #define HWRM_CFA_ENCAP_RECORD_FREE 0x98UL
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC 0x99UL
+ #define HWRM_CFA_NTUPLE_FILTER_FREE 0x9aUL
+ #define HWRM_CFA_NTUPLE_FILTER_CFG 0x9bUL
+ #define HWRM_CFA_EM_FLOW_ALLOC 0x9cUL
+ #define HWRM_CFA_EM_FLOW_FREE 0x9dUL
+ #define HWRM_CFA_EM_FLOW_CFG 0x9eUL
+ #define HWRM_TUNNEL_DST_PORT_QUERY 0xa0UL
+ #define HWRM_TUNNEL_DST_PORT_ALLOC 0xa1UL
+ #define HWRM_TUNNEL_DST_PORT_FREE 0xa2UL
+ #define HWRM_STAT_CTX_ALLOC 0xb0UL
+ #define HWRM_STAT_CTX_FREE 0xb1UL
+ #define HWRM_STAT_CTX_QUERY 0xb2UL
+ #define HWRM_STAT_CTX_CLR_STATS 0xb3UL
+ #define HWRM_FW_RESET 0xc0UL
+ #define HWRM_FW_QSTATUS 0xc1UL
+ #define HWRM_FW_SET_TIME 0xc8UL
+ #define HWRM_FW_GET_TIME 0xc9UL
+ #define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL
+ #define HWRM_FW_GET_STRUCTURED_DATA 0xcbUL
+ #define HWRM_FW_IPC_MAILBOX 0xccUL
+ #define HWRM_EXEC_FWD_RESP 0xd0UL
+ #define HWRM_REJECT_FWD_RESP 0xd1UL
+ #define HWRM_FWD_RESP 0xd2UL
+ #define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL
+ #define HWRM_TEMP_MONITOR_QUERY 0xe0UL
+ #define HWRM_WOL_FILTER_ALLOC 0xf0UL
+ #define HWRM_WOL_FILTER_FREE 0xf1UL
+ #define HWRM_WOL_FILTER_QCFG 0xf2UL
+ #define HWRM_WOL_REASON_QCFG 0xf3UL
+ #define HWRM_CFA_METER_PROFILE_ALLOC 0xf5UL
+ #define HWRM_CFA_METER_PROFILE_FREE 0xf6UL
+ #define HWRM_CFA_METER_PROFILE_CFG 0xf7UL
+ #define HWRM_CFA_METER_INSTANCE_ALLOC 0xf8UL
+ #define HWRM_CFA_METER_INSTANCE_FREE 0xf9UL
+ #define HWRM_CFA_VFR_ALLOC 0xfdUL
+ #define HWRM_CFA_VFR_FREE 0xfeUL
+ #define HWRM_CFA_VF_PAIR_ALLOC 0x100UL
+ #define HWRM_CFA_VF_PAIR_FREE 0x101UL
+ #define HWRM_CFA_VF_PAIR_INFO 0x102UL
+ #define HWRM_CFA_FLOW_ALLOC 0x103UL
+ #define HWRM_CFA_FLOW_FREE 0x104UL
+ #define HWRM_CFA_FLOW_FLUSH 0x105UL
+ #define HWRM_CFA_FLOW_STATS 0x106UL
+ #define HWRM_CFA_FLOW_INFO 0x107UL
+ #define HWRM_CFA_DECAP_FILTER_ALLOC 0x108UL
+ #define HWRM_CFA_DECAP_FILTER_FREE 0x109UL
+ #define HWRM_CFA_VLAN_ANTISPOOF_QCFG 0x10aUL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC 0x10bUL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE 0x10cUL
+ #define HWRM_CFA_PAIR_ALLOC 0x10dUL
+ #define HWRM_CFA_PAIR_FREE 0x10eUL
+ #define HWRM_CFA_PAIR_INFO 0x10fUL
+ #define HWRM_FW_IPC_MSG 0x110UL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL
+ #define HWRM_ENGINE_CKV_HELLO 0x12dUL
+ #define HWRM_ENGINE_CKV_STATUS 0x12eUL
+ #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
+ #define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL
+ #define HWRM_ENGINE_CKV_KEY_ADD 0x131UL
+ #define HWRM_ENGINE_CKV_KEY_DELETE 0x132UL
+ #define HWRM_ENGINE_CKV_FLUSH 0x133UL
+ #define HWRM_ENGINE_CKV_RNG_GET 0x134UL
+ #define HWRM_ENGINE_CKV_KEY_GEN 0x135UL
+ #define HWRM_ENGINE_QG_CONFIG_QUERY 0x13cUL
+ #define HWRM_ENGINE_QG_QUERY 0x13dUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_QUERY 0x13fUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_ALLOC 0x140UL
+ #define HWRM_ENGINE_QG_METER_PROFILE_FREE 0x141UL
+ #define HWRM_ENGINE_QG_METER_QUERY 0x142UL
+ #define HWRM_ENGINE_QG_METER_BIND 0x143UL
+ #define HWRM_ENGINE_QG_METER_UNBIND 0x144UL
+ #define HWRM_ENGINE_QG_FUNC_BIND 0x145UL
+ #define HWRM_ENGINE_SG_CONFIG_QUERY 0x146UL
+ #define HWRM_ENGINE_SG_QUERY 0x147UL
+ #define HWRM_ENGINE_SG_METER_QUERY 0x148UL
+ #define HWRM_ENGINE_SG_METER_CONFIG 0x149UL
+ #define HWRM_ENGINE_SG_QG_BIND 0x14aUL
+ #define HWRM_ENGINE_QG_SG_UNBIND 0x14bUL
+ #define HWRM_ENGINE_CONFIG_QUERY 0x154UL
+ #define HWRM_ENGINE_STATS_CONFIG 0x155UL
+ #define HWRM_ENGINE_STATS_CLEAR 0x156UL
+ #define HWRM_ENGINE_STATS_QUERY 0x157UL
+ #define HWRM_ENGINE_RQ_ALLOC 0x15eUL
+ #define HWRM_ENGINE_RQ_FREE 0x15fUL
+ #define HWRM_ENGINE_CQ_ALLOC 0x160UL
+ #define HWRM_ENGINE_CQ_FREE 0x161UL
+ #define HWRM_ENGINE_NQ_ALLOC 0x162UL
+ #define HWRM_ENGINE_NQ_FREE 0x163UL
+ #define HWRM_ENGINE_ON_DIE_RQE_CREDITS 0x164UL
+ #define HWRM_FUNC_RESOURCE_QCAPS 0x190UL
+ #define HWRM_FUNC_VF_RESOURCE_CFG 0x191UL
+ #define HWRM_SELFTEST_QLIST 0x200UL
+ #define HWRM_SELFTEST_EXEC 0x201UL
+ #define HWRM_SELFTEST_IRQ 0x202UL
+ #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL
+ #define HWRM_DBG_READ_DIRECT 0xff10UL
+ #define HWRM_DBG_READ_INDIRECT 0xff11UL
+ #define HWRM_DBG_WRITE_DIRECT 0xff12UL
+ #define HWRM_DBG_WRITE_INDIRECT 0xff13UL
+ #define HWRM_DBG_DUMP 0xff14UL
+ #define HWRM_DBG_ERASE_NVM 0xff15UL
+ #define HWRM_DBG_CFG 0xff16UL
+ #define HWRM_DBG_COREDUMP_LIST 0xff17UL
+ #define HWRM_DBG_COREDUMP_INITIATE 0xff18UL
+ #define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL
+ #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
+ #define HWRM_NVM_VALIDATE_OPTION 0xffefUL
+ #define HWRM_NVM_FLUSH 0xfff0UL
+ #define HWRM_NVM_GET_VARIABLE 0xfff1UL
+ #define HWRM_NVM_SET_VARIABLE 0xfff2UL
+ #define HWRM_NVM_INSTALL_UPDATE 0xfff3UL
+ #define HWRM_NVM_MODIFY 0xfff4UL
+ #define HWRM_NVM_VERIFY_UPDATE 0xfff5UL
+ #define HWRM_NVM_GET_DEV_INFO 0xfff6UL
+ #define HWRM_NVM_ERASE_DIR_ENTRY 0xfff7UL
+ #define HWRM_NVM_MOD_DIR_ENTRY 0xfff8UL
+ #define HWRM_NVM_FIND_DIR_ENTRY 0xfff9UL
+ #define HWRM_NVM_GET_DIR_ENTRIES 0xfffaUL
+ #define HWRM_NVM_GET_DIR_INFO 0xfffbUL
+ #define HWRM_NVM_RAW_DUMP 0xfffcUL
+ #define HWRM_NVM_READ 0xfffdUL
+ #define HWRM_NVM_WRITE 0xfffeUL
+ #define HWRM_NVM_RAW_WRITE_BLK 0xffffUL
+ #define HWRM_LAST HWRM_NVM_RAW_WRITE_BLK
+ __le16 unused_0[3];
+};
+
+/* ret_codes (size:64b/8B) */
+struct ret_codes {
+ __le16 error_code;
+ #define HWRM_ERR_CODE_SUCCESS 0x0UL
+ #define HWRM_ERR_CODE_FAIL 0x1UL
+ #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL
+ #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL
+ #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL
+ #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL
+ #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
+ #define HWRM_ERR_CODE_NO_BUFFER 0x8UL
+ #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
+ #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
+ #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
+ #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED
+ __le16 unused_0[3];
+};
+
+/* hwrm_err_output (size:128b/16B) */
+struct hwrm_err_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 opaque_0;
+ __le16 opaque_1;
+ u8 cmd_err;
+ u8 valid;
+};
+#define HWRM_NA_SIGNATURE ((__le32)(-1))
+#define HWRM_MAX_REQ_LEN 128
+#define HWRM_MAX_RESP_LEN 280
+#define HW_HASH_INDEX_SIZE 0x80
+#define HW_HASH_KEY_SIZE 40
+#define HWRM_RESP_VALID_KEY 1
+#define HWRM_VERSION_MAJOR 1
+#define HWRM_VERSION_MINOR 9
+#define HWRM_VERSION_UPDATE 0
+#define HWRM_VERSION_RSVD 0
+#define HWRM_VERSION_STR "1.9.0.0"
+
+/* hwrm_ver_get_input (size:192b/24B) */
+struct hwrm_ver_get_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 hwrm_intf_maj;
+ u8 hwrm_intf_min;
+ u8 hwrm_intf_upd;
+ u8 unused_0[5];
+};
+
+/* hwrm_ver_get_output (size:1408b/176B) */
+struct hwrm_ver_get_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 hwrm_intf_maj_8b;
+ u8 hwrm_intf_min_8b;
+ u8 hwrm_intf_upd_8b;
+ u8 hwrm_intf_rsvd_8b;
+ u8 hwrm_fw_maj_8b;
+ u8 hwrm_fw_min_8b;
+ u8 hwrm_fw_bld_8b;
+ u8 hwrm_fw_rsvd_8b;
+ u8 mgmt_fw_maj_8b;
+ u8 mgmt_fw_min_8b;
+ u8 mgmt_fw_bld_8b;
+ u8 mgmt_fw_rsvd_8b;
+ u8 netctrl_fw_maj_8b;
+ u8 netctrl_fw_min_8b;
+ u8 netctrl_fw_bld_8b;
+ u8 netctrl_fw_rsvd_8b;
+ __le32 dev_caps_cfg;
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
+ u8 roce_fw_maj_8b;
+ u8 roce_fw_min_8b;
+ u8 roce_fw_bld_8b;
+ u8 roce_fw_rsvd_8b;
+ char hwrm_fw_name[16];
+ char mgmt_fw_name[16];
+ char netctrl_fw_name[16];
+ u8 reserved2[16];
+ char roce_fw_name[16];
+ __le16 chip_num;
+ u8 chip_rev;
+ u8 chip_metal;
+ u8 chip_bond_id;
+ u8 chip_platform_type;
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_LAST VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM
+ __le16 max_req_win_len;
+ __le16 max_resp_len;
+ __le16 def_req_timeout;
+ u8 flags;
+ #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL
+ #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL
+ u8 unused_0[2];
+ u8 always_1;
+ __le16 hwrm_intf_major;
+ __le16 hwrm_intf_minor;
+ __le16 hwrm_intf_build;
+ __le16 hwrm_intf_patch;
+ __le16 hwrm_fw_major;
+ __le16 hwrm_fw_minor;
+ __le16 hwrm_fw_build;
+ __le16 hwrm_fw_patch;
+ __le16 mgmt_fw_major;
+ __le16 mgmt_fw_minor;
+ __le16 mgmt_fw_build;
+ __le16 mgmt_fw_patch;
+ __le16 netctrl_fw_major;
+ __le16 netctrl_fw_minor;
+ __le16 netctrl_fw_build;
+ __le16 netctrl_fw_patch;
+ __le16 roce_fw_major;
+ __le16 roce_fw_minor;
+ __le16 roce_fw_build;
+ __le16 roce_fw_patch;
+ __le16 max_ext_req_len;
+ u8 unused_1[5];
+ u8 valid;
+};
+
+/* eject_cmpl (size:128b/16B) */
struct eject_cmpl {
- __le16 type;
- #define EJECT_CMPL_TYPE_MASK 0x3fUL
- #define EJECT_CMPL_TYPE_SFT 0
- #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
- __le16 len;
- __le32 opaque;
- __le32 v;
- #define EJECT_CMPL_V 0x1UL
- __le32 unused_2;
-};
-
-/* HWRM Completion Record (16 bytes) */
+ __le16 type;
+ #define EJECT_CMPL_TYPE_MASK 0x3fUL
+ #define EJECT_CMPL_TYPE_SFT 0
+ #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
+ #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT
+ __le16 len;
+ __le32 opaque;
+ __le32 v;
+ #define EJECT_CMPL_V 0x1UL
+ __le32 unused_2;
+};
+
+/* hwrm_cmpl (size:128b/16B) */
struct hwrm_cmpl {
- __le16 type;
- #define CMPL_TYPE_MASK 0x3fUL
- #define CMPL_TYPE_SFT 0
- #define CMPL_TYPE_HWRM_DONE 0x20UL
- __le16 sequence_id;
- __le32 unused_1;
- __le32 v;
- #define CMPL_V 0x1UL
- __le32 unused_3;
-};
-
-/* HWRM Forwarded Request (16 bytes) */
+ __le16 type;
+ #define CMPL_TYPE_MASK 0x3fUL
+ #define CMPL_TYPE_SFT 0
+ #define CMPL_TYPE_HWRM_DONE 0x20UL
+ #define CMPL_TYPE_LAST CMPL_TYPE_HWRM_DONE
+ __le16 sequence_id;
+ __le32 unused_1;
+ __le32 v;
+ #define CMPL_V 0x1UL
+ __le32 unused_3;
+};
+
+/* hwrm_fwd_req_cmpl (size:128b/16B) */
struct hwrm_fwd_req_cmpl {
- __le16 req_len_type;
- #define FWD_REQ_CMPL_TYPE_MASK 0x3fUL
- #define FWD_REQ_CMPL_TYPE_SFT 0
- #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL
- #define FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
- #define FWD_REQ_CMPL_REQ_LEN_SFT 6
- __le16 source_id;
- __le32 unused_0;
- __le32 req_buf_addr_v[2];
- #define FWD_REQ_CMPL_V 0x1UL
- #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
- #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
-};
-
-/* HWRM Forwarded Response (16 bytes) */
+ __le16 req_len_type;
+ #define FWD_REQ_CMPL_TYPE_MASK 0x3fUL
+ #define FWD_REQ_CMPL_TYPE_SFT 0
+ #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL
+ #define FWD_REQ_CMPL_TYPE_LAST FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ
+ #define FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
+ #define FWD_REQ_CMPL_REQ_LEN_SFT 6
+ __le16 source_id;
+ __le32 unused0;
+ __le32 req_buf_addr_v[2];
+ #define FWD_REQ_CMPL_V 0x1UL
+ #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
+ #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
+};
+
+/* hwrm_fwd_resp_cmpl (size:128b/16B) */
struct hwrm_fwd_resp_cmpl {
- __le16 type;
- #define FWD_RESP_CMPL_TYPE_MASK 0x3fUL
- #define FWD_RESP_CMPL_TYPE_SFT 0
- #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL
- __le16 source_id;
- __le16 resp_len;
- __le16 unused_1;
- __le32 resp_buf_addr_v[2];
- #define FWD_RESP_CMPL_V 0x1UL
- #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
- #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
-};
-
-/* HWRM Asynchronous Event Completion Record (16 bytes) */
+ __le16 type;
+ #define FWD_RESP_CMPL_TYPE_MASK 0x3fUL
+ #define FWD_RESP_CMPL_TYPE_SFT 0
+ #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL
+ #define FWD_RESP_CMPL_TYPE_LAST FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP
+ __le16 source_id;
+ __le16 resp_len;
+ __le16 unused_1;
+ __le32 resp_buf_addr_v[2];
+ #define FWD_RESP_CMPL_V 0x1UL
+ #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
+ #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
+};
+
+/* hwrm_async_event_cmpl (size:128b/16B) */
struct hwrm_async_event_cmpl {
- __le16 type;
- #define ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_TYPE_LAST ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_V 0x1UL
- #define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
-};
-
-/* HWRM Asynchronous Event Completion Record for link status change (16 bytes) */
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_V 0x1UL
+ #define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */
struct hwrm_async_event_cmpl_link_status_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN (0x0UL << 0)
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP (0x1UL << 0)
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
-};
-
-/* HWRM Asynchronous Event Completion Record for link MTU change (16 bytes) */
-struct hwrm_async_event_cmpl_link_mtu_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE 0x1UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for link speed change (16 bytes) */
-struct hwrm_async_event_cmpl_link_speed_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT 1
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB (0x1UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB (0xaUL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB (0x14UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB (0x19UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB (0x64UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB (0xc8UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
-};
-
-/* HWRM Asynchronous Event Completion Record for DCB Config change (16 bytes) */
-struct hwrm_async_event_cmpl_dcb_config_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_ETS 0x1UL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_PFC 0x2UL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_APP 0x4UL
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_MASK 0xff0000UL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_SFT 16
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE (0xffUL << 16)
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_MASK 0xff000000UL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_SFT 24
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE (0xffUL << 24)
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE
-};
-
-/* HWRM Asynchronous Event Completion Record for port connection not allowed (16 bytes) */
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN 0x0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
+};
+
+/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */
struct hwrm_async_event_cmpl_port_conn_not_allowed {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
-};
-
-/* HWRM Asynchronous Event Completion Record for link speed config not allowed (16 bytes) */
-struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed {
- __le16 type;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for link speed configuration change (16 bytes) */
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
+};
+
+/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */
struct hwrm_async_event_cmpl_link_speed_cfg_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
-};
-
-/* HWRM Asynchronous Event Completion Record for Function Driver Unload (16 bytes) */
-struct hwrm_async_event_cmpl_func_drvr_unload {
- __le16 type;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V 0x1UL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for Function Driver load (16 bytes) */
-struct hwrm_async_event_cmpl_func_drvr_load {
- __le16 type;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V 0x1UL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record to indicate completion of FLR related processing (16 bytes) */
-struct hwrm_async_event_cmpl_func_flr_proc_cmplt {
- __le16 type;
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_V 0x1UL
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for PF Driver Unload (16 bytes) */
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
+};
+
+/* hwrm_async_event_cmpl_pf_drvr_unload (size:128b/16B) */
struct hwrm_async_event_cmpl_pf_drvr_unload {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16
-};
-
-/* HWRM Asynchronous Event Completion Record for PF Driver load (16 bytes) */
-struct hwrm_async_event_cmpl_pf_drvr_load {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD 0x21UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V 0x1UL
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_MASK 0x70000UL
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_SFT 16
-};
-
-/* HWRM Asynchronous Event Completion Record for VF FLR (16 bytes) */
-struct hwrm_async_event_cmpl_vf_flr {
- __le16 type;
- #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR 0x30UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_VF_FLR_V 0x1UL
- #define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for VF MAC Addr change (16 bytes) */
-struct hwrm_async_event_cmpl_vf_mac_addr_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for PF-VF communication status change (16 bytes) */
-struct hwrm_async_event_cmpl_pf_vf_comm_status_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED 0x1UL
-};
-
-/* HWRM Asynchronous Event Completion Record for VF configuration change (16 bytes) */
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16
+};
+
+/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
struct hwrm_async_event_cmpl_vf_cfg_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
-};
-
-/* HWRM Asynchronous Event Completion Record for HWRM Error (16 bytes) */
-struct hwrm_async_event_cmpl_hwrm_error {
- __le16 type;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
-};
-
-/* hwrm_ver_get */
-/* Input (24 bytes) */
-struct hwrm_ver_get_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 hwrm_intf_maj;
- u8 hwrm_intf_min;
- u8 hwrm_intf_upd;
- u8 unused_0[5];
-};
-
-/* Output (128 bytes) */
-struct hwrm_ver_get_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 hwrm_intf_maj;
- u8 hwrm_intf_min;
- u8 hwrm_intf_upd;
- u8 hwrm_intf_rsvd;
- u8 hwrm_fw_maj;
- u8 hwrm_fw_min;
- u8 hwrm_fw_bld;
- u8 hwrm_fw_rsvd;
- u8 mgmt_fw_maj;
- u8 mgmt_fw_min;
- u8 mgmt_fw_bld;
- u8 mgmt_fw_rsvd;
- u8 netctrl_fw_maj;
- u8 netctrl_fw_min;
- u8 netctrl_fw_bld;
- u8 netctrl_fw_rsvd;
- __le32 dev_caps_cfg;
- #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
- #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
- #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
- #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
- u8 roce_fw_maj;
- u8 roce_fw_min;
- u8 roce_fw_bld;
- u8 roce_fw_rsvd;
- char hwrm_fw_name[16];
- char mgmt_fw_name[16];
- char netctrl_fw_name[16];
- __le32 reserved2[4];
- char roce_fw_name[16];
- __le16 chip_num;
- u8 chip_rev;
- u8 chip_metal;
- u8 chip_bond_id;
- u8 chip_platform_type;
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL
- __le16 max_req_win_len;
- __le16 max_resp_len;
- __le16 def_req_timeout;
- u8 init_pending;
- #define VER_GET_RESP_INIT_PENDING_DEV_NOT_RDY 0x1UL
- u8 unused_0;
- u8 unused_1;
- u8 valid;
-};
-
-/* hwrm_func_reset */
-/* Input (24 bytes) */
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
+};
+
+/* hwrm_func_reset_input (size:192b/24B) */
struct hwrm_func_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
- __le16 vf_id;
- u8 func_reset_level;
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL
- u8 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
+ __le16 vf_id;
+ u8 func_reset_level;
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_LAST FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF
+ u8 unused_0;
+};
+
+/* hwrm_func_reset_output (size:128b/16B) */
struct hwrm_func_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_getfid */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_getfid_input (size:192b/24B) */
struct hwrm_func_getfid_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL
- __le16 pci_id;
- __le16 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL
+ __le16 pci_id;
+ u8 unused_0[2];
+};
+
+/* hwrm_func_getfid_output (size:128b/16B) */
struct hwrm_func_getfid_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_func_vf_alloc */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_func_vf_alloc_input (size:192b/24B) */
struct hwrm_func_vf_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL
- __le16 first_vf_id;
- __le16 num_vfs;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL
+ __le16 first_vf_id;
+ __le16 num_vfs;
+};
+
+/* hwrm_func_vf_alloc_output (size:128b/16B) */
struct hwrm_func_vf_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 first_vf_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_func_vf_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 first_vf_id;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_func_vf_free_input (size:192b/24B) */
struct hwrm_func_vf_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL
- __le16 first_vf_id;
- __le16 num_vfs;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL
+ __le16 first_vf_id;
+ __le16 num_vfs;
+};
+
+/* hwrm_func_vf_free_output (size:128b/16B) */
struct hwrm_func_vf_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_vf_cfg */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_vf_cfg_input (size:448b/56B) */
struct hwrm_func_vf_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
- #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
- #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
- #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL
- __le16 mtu;
- __le16 guest_vlan;
- __le16 async_event_cr;
- u8 dflt_mac_addr[6];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
+ #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
+ #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
+ #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x10UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x20UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS 0x40UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS 0x80UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS 0x100UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL
+ __le16 mtu;
+ __le16 guest_vlan;
+ __le16 async_event_cr;
+ u8 dflt_mac_addr[6];
+ __le32 flags;
+ #define FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x1UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x2UL
+ #define FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x4UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x8UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x10UL
+ #define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x20UL
+ #define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x40UL
+ #define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x80UL
+ __le16 num_rsscos_ctxs;
+ __le16 num_cmpl_rings;
+ __le16 num_tx_rings;
+ __le16 num_rx_rings;
+ __le16 num_l2_ctxs;
+ __le16 num_vnics;
+ __le16 num_stat_ctxs;
+ __le16 num_hw_ring_grps;
+ u8 unused_0[4];
+};
+
+/* hwrm_func_vf_cfg_output (size:128b/16B) */
struct hwrm_func_vf_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_qcaps */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_qcaps_input (size:192b/24B) */
struct hwrm_func_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
};
-/* Output (80 bytes) */
+/* hwrm_func_qcaps_output (size:640b/80B) */
struct hwrm_func_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- __le16 port_id;
- __le32 flags;
- #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
- #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
- #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
- #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
- #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
- #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
- #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
- #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
- #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
- #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
- u8 mac_address[6];
- __le16 max_rsscos_ctx;
- __le16 max_cmpl_rings;
- __le16 max_tx_rings;
- __le16 max_rx_rings;
- __le16 max_l2_ctxs;
- __le16 max_vnics;
- __le16 first_vf_id;
- __le16 max_vfs;
- __le16 max_stat_ctx;
- __le32 max_encap_records;
- __le32 max_decap_records;
- __le32 max_tx_em_flows;
- __le32 max_tx_wm_flows;
- __le32 max_rx_em_flows;
- __le32 max_rx_wm_flows;
- __le32 max_mcast_filters;
- __le32 max_flow_id;
- __le32 max_hw_ring_grps;
- __le16 max_sp_tx_rings;
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_func_qcfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ __le16 port_id;
+ __le32 flags;
+ #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL
+ u8 mac_address[6];
+ __le16 max_rsscos_ctx;
+ __le16 max_cmpl_rings;
+ __le16 max_tx_rings;
+ __le16 max_rx_rings;
+ __le16 max_l2_ctxs;
+ __le16 max_vnics;
+ __le16 first_vf_id;
+ __le16 max_vfs;
+ __le16 max_stat_ctx;
+ __le32 max_encap_records;
+ __le32 max_decap_records;
+ __le32 max_tx_em_flows;
+ __le32 max_tx_wm_flows;
+ __le32 max_rx_em_flows;
+ __le32 max_rx_wm_flows;
+ __le32 max_mcast_filters;
+ __le32 max_flow_id;
+ __le32 max_hw_ring_grps;
+ __le16 max_sp_tx_rings;
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_func_qcfg_input (size:192b/24B) */
struct hwrm_func_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
};
-/* Output (72 bytes) */
+/* hwrm_func_qcfg_output (size:640b/80B) */
struct hwrm_func_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- __le16 port_id;
- __le16 vlan;
- __le16 flags;
- #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
- #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
- #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
- #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
- #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
- #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
- u8 mac_address[6];
- __le16 pci_id;
- __le16 alloc_rsscos_ctx;
- __le16 alloc_cmpl_rings;
- __le16 alloc_tx_rings;
- __le16 alloc_rx_rings;
- __le16 alloc_l2_ctx;
- __le16 alloc_vnics;
- __le16 mtu;
- __le16 mru;
- __le16 stat_ctx_id;
- u8 port_partition_type;
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
- u8 port_pf_cnt;
- #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
- __le16 dflt_vnic_id;
- __le16 max_mtu_configured;
- __le32 min_bw;
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
- #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL
- #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ __le16 port_id;
+ __le16 vlan;
+ __le16 flags;
+ #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
+ #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
+ #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
+ #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
+ #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
+ u8 mac_address[6];
+ __le16 pci_id;
+ __le16 alloc_rsscos_ctx;
+ __le16 alloc_cmpl_rings;
+ __le16 alloc_tx_rings;
+ __le16 alloc_rx_rings;
+ __le16 alloc_l2_ctx;
+ __le16 alloc_vnics;
+ __le16 mtu;
+ __le16 mru;
+ __le16 stat_ctx_id;
+ u8 port_partition_type;
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_LAST FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN
+ u8 port_pf_cnt;
+ #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
+ #define FUNC_QCFG_RESP_PORT_PF_CNT_LAST FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL
+ __le16 dflt_vnic_id;
+ __le16 max_mtu_configured;
+ __le32 min_bw;
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 max_bw;
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0
- #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL
- #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 max_bw;
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 evb_mode;
- #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
- #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
- #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
- u8 unused_0;
- __le16 alloc_vfs;
- __le32 alloc_mcast_filters;
- __le32 alloc_hw_ring_grps;
- __le16 alloc_sp_tx_rings;
- u8 unused_1;
- u8 valid;
-};
-
-/* hwrm_func_vlan_cfg */
-/* Input (48 bytes) */
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 evb_mode;
+ #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
+ #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
+ #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
+ #define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA
+ u8 cache_linesize;
+ #define FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64 0x0UL
+ #define FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128 0x1UL
+ #define FUNC_QCFG_RESP_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128
+ __le16 alloc_vfs;
+ __le32 alloc_mcast_filters;
+ __le32 alloc_hw_ring_grps;
+ __le16 alloc_sp_tx_rings;
+ __le16 alloc_stat_ctx;
+ u8 unused_2[7];
+ u8 valid;
+};
+
+/* hwrm_func_vlan_cfg_input (size:384b/48B) */
struct hwrm_func_vlan_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0;
- u8 unused_1;
- __le32 enables;
- #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID 0x1UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID 0x2UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP 0x4UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP 0x8UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID 0x10UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID 0x20UL
- __le16 stag_vid;
- u8 stag_pcp;
- u8 unused_2;
- __be16 stag_tpid;
- __le16 ctag_vid;
- u8 ctag_pcp;
- u8 unused_3;
- __be16 ctag_tpid;
- __le32 rsvd1;
- __le32 rsvd2;
- __le32 unused_4;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[2];
+ __le32 enables;
+ #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID 0x1UL
+ #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID 0x2UL
+ #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP 0x4UL
+ #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP 0x8UL
+ #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID 0x10UL
+ #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID 0x20UL
+ __le16 stag_vid;
+ u8 stag_pcp;
+ u8 unused_1;
+ __be16 stag_tpid;
+ __le16 ctag_vid;
+ u8 ctag_pcp;
+ u8 unused_2;
+ __be16 ctag_tpid;
+ __le32 rsvd1;
+ __le32 rsvd2;
+ u8 unused_3[4];
+};
+
+/* hwrm_func_vlan_cfg_output (size:128b/16B) */
struct hwrm_func_vlan_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_cfg */
-/* Input (88 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_cfg_input (size:704b/88B) */
struct hwrm_func_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0;
- u8 unused_1;
- __le32 flags;
- #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL
- #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL
- #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL
- #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2
- #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL
- #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL
- #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL
- #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL
- #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL
- __le32 enables;
- #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
- #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
- #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
- #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
- #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
- #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
- #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
- #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
- #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
- #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
- #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
- #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
- #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
- #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
- #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
- __le16 mtu;
- __le16 mru;
- __le16 num_rsscos_ctxs;
- __le16 num_cmpl_rings;
- __le16 num_tx_rings;
- __le16 num_rx_rings;
- __le16 num_l2_ctxs;
- __le16 num_vnics;
- __le16 num_stat_ctxs;
- __le16 num_hw_ring_grps;
- u8 dflt_mac_addr[6];
- __le16 dflt_vlan;
- __be32 dflt_ip_addr[4];
- __le32 min_bw;
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0
- #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL
- #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 max_bw;
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0
- #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL
- #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
- __le16 async_event_cr;
- u8 vlan_antispoof_mode;
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[2];
+ __le32 flags;
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL
+ #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL
+ #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2
+ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL
+ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL
+ #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL
+ #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL
+ #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL
+ #define FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x4000UL
+ #define FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x8000UL
+ #define FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x10000UL
+ #define FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x20000UL
+ #define FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x40000UL
+ #define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL
+ #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL
+ __le32 enables;
+ #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
+ #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
+ #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
+ #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
+ #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
+ #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
+ #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
+ #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
+ #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
+ __le16 mtu;
+ __le16 mru;
+ __le16 num_rsscos_ctxs;
+ __le16 num_cmpl_rings;
+ __le16 num_tx_rings;
+ __le16 num_rx_rings;
+ __le16 num_l2_ctxs;
+ __le16 num_vnics;
+ __le16 num_stat_ctxs;
+ __le16 num_hw_ring_grps;
+ u8 dflt_mac_addr[6];
+ __le16 dflt_vlan;
+ __be32 dflt_ip_addr[4];
+ __le32 min_bw;
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 max_bw;
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
+ __le16 async_event_cr;
+ u8 vlan_antispoof_mode;
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL
#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN 0x3UL
- u8 allowed_vlan_pris;
- u8 evb_mode;
- #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL
- #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL
- #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
- u8 unused_2;
- __le16 num_mcast_filters;
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_LAST FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN
+ u8 allowed_vlan_pris;
+ u8 evb_mode;
+ #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL
+ #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL
+ #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
+ #define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA
+ u8 cache_linesize;
+ #define FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_64 0x0UL
+ #define FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_128 0x1UL
+ #define FUNC_CFG_REQ_CACHE_LINESIZE_LAST FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_128
+ __le16 num_mcast_filters;
+};
+
+/* hwrm_func_cfg_output (size:128b/16B) */
+struct hwrm_func_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
};
-/* Output (16 bytes) */
-struct hwrm_func_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_qstats */
-/* Input (24 bytes) */
+/* hwrm_func_qstats_input (size:192b/24B) */
struct hwrm_func_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
};
-/* Output (176 bytes) */
+/* hwrm_func_qstats_output (size:1408b/176B) */
struct hwrm_func_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_drop_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_drop_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 rx_agg_pkts;
- __le64 rx_agg_bytes;
- __le64 rx_agg_events;
- __le64 rx_agg_aborts;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_clr_stats */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 rx_agg_pkts;
+ __le64 rx_agg_bytes;
+ __le64 rx_agg_events;
+ __le64 rx_agg_aborts;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_clr_stats_input (size:192b/24B) */
struct hwrm_func_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_func_clr_stats_output (size:128b/16B) */
struct hwrm_func_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_vf_resc_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_vf_resc_free_input (size:192b/24B) */
struct hwrm_func_vf_resc_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vf_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_func_vf_resc_free_output (size:128b/16B) */
struct hwrm_func_vf_resc_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_vf_vnic_ids_query */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_vf_vnic_ids_query_input (size:256b/32B) */
struct hwrm_func_vf_vnic_ids_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vf_id;
- u8 unused_0;
- u8 unused_1;
- __le32 max_vnic_id_cnt;
- __le64 vnic_id_tbl_addr;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ u8 unused_0[2];
+ __le32 max_vnic_id_cnt;
+ __le64 vnic_id_tbl_addr;
+};
+
+/* hwrm_func_vf_vnic_ids_query_output (size:128b/16B) */
struct hwrm_func_vf_vnic_ids_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 vnic_id_cnt;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_func_drv_rgtr */
-/* Input (80 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 vnic_id_cnt;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_func_drv_rgtr_input (size:832b/104B) */
struct hwrm_func_drv_rgtr_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
- __le32 enables;
- #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
- __le16 os_type;
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL
- u8 ver_maj;
- u8 ver_min;
- u8 ver_upd;
- u8 unused_0;
- __le16 unused_1;
- __le32 timestamp;
- __le32 unused_2;
- __le32 vf_req_fwd[8];
- __le32 async_event_fwd[8];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
+ __le32 enables;
+ #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
+ __le16 os_type;
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_LAST FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI
+ u8 ver_maj;
+ u8 ver_min;
+ u8 ver_upd;
+ u8 unused_0[3];
+ __le32 timestamp;
+ u8 unused_1[4];
+ __le32 vf_req_fwd[8];
+ __le32 async_event_fwd[8];
+};
+
+/* hwrm_func_drv_rgtr_output (size:128b/16B) */
struct hwrm_func_drv_rgtr_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_drv_unrgtr */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_unrgtr_input (size:192b/24B) */
struct hwrm_func_drv_unrgtr_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
#define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN 0x1UL
- __le32 unused_0;
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_func_drv_unrgtr_output (size:128b/16B) */
struct hwrm_func_drv_unrgtr_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_buf_rgtr */
-/* Input (128 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_buf_rgtr_input (size:1024b/128B) */
struct hwrm_func_buf_rgtr_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL
- #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL
- __le16 vf_id;
- __le16 req_buf_num_pages;
- __le16 req_buf_page_size;
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL
- __le16 req_buf_len;
- __le16 resp_buf_len;
- u8 unused_0;
- u8 unused_1;
- __le64 req_buf_page_addr0;
- __le64 req_buf_page_addr1;
- __le64 req_buf_page_addr2;
- __le64 req_buf_page_addr3;
- __le64 req_buf_page_addr4;
- __le64 req_buf_page_addr5;
- __le64 req_buf_page_addr6;
- __le64 req_buf_page_addr7;
- __le64 req_buf_page_addr8;
- __le64 req_buf_page_addr9;
- __le64 error_buf_addr;
- __le64 resp_buf_addr;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL
+ #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL
+ __le16 vf_id;
+ __le16 req_buf_num_pages;
+ __le16 req_buf_page_size;
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_LAST FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G
+ __le16 req_buf_len;
+ __le16 resp_buf_len;
+ u8 unused_0[2];
+ __le64 req_buf_page_addr0;
+ __le64 req_buf_page_addr1;
+ __le64 req_buf_page_addr2;
+ __le64 req_buf_page_addr3;
+ __le64 req_buf_page_addr4;
+ __le64 req_buf_page_addr5;
+ __le64 req_buf_page_addr6;
+ __le64 req_buf_page_addr7;
+ __le64 req_buf_page_addr8;
+ __le64 req_buf_page_addr9;
+ __le64 error_buf_addr;
+ __le64 resp_buf_addr;
+};
+
+/* hwrm_func_buf_rgtr_output (size:128b/16B) */
struct hwrm_func_buf_rgtr_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_drv_qver */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_qver_input (size:192b/24B) */
struct hwrm_func_drv_qver_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 reserved;
- __le16 fid;
- __le16 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 reserved;
+ __le16 fid;
+ u8 unused_0[2];
};
-/* Output (16 bytes) */
+/* hwrm_func_drv_qver_output (size:128b/16B) */
struct hwrm_func_drv_qver_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 os_type;
- #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL
- u8 ver_maj;
- u8 ver_min;
- u8 ver_upd;
- u8 unused_0;
- u8 unused_1;
- u8 valid;
-};
-
-/* hwrm_port_phy_cfg */
-/* Input (56 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 os_type;
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_LAST FUNC_DRV_QVER_RESP_OS_TYPE_UEFI
+ u8 ver_maj;
+ u8 ver_min;
+ u8 ver_upd;
+ u8 unused_0[2];
+ u8 valid;
+};
+
+/* hwrm_func_resource_qcaps_input (size:192b/24B) */
+struct hwrm_func_resource_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_resource_qcaps_output (size:384b/48B) */
+struct hwrm_func_resource_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 max_vfs;
+ __le16 max_msix;
+ __le16 vf_reservation_strategy;
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL
+ __le16 min_rsscos_ctx;
+ __le16 max_rsscos_ctx;
+ __le16 min_cmpl_rings;
+ __le16 max_cmpl_rings;
+ __le16 min_tx_rings;
+ __le16 max_tx_rings;
+ __le16 min_rx_rings;
+ __le16 max_rx_rings;
+ __le16 min_l2_ctxs;
+ __le16 max_l2_ctxs;
+ __le16 min_vnics;
+ __le16 max_vnics;
+ __le16 min_stat_ctx;
+ __le16 max_stat_ctx;
+ __le16 min_hw_ring_grps;
+ __le16 max_hw_ring_grps;
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_func_vf_resource_cfg_input (size:448b/56B) */
+struct hwrm_func_vf_resource_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ __le16 max_msix;
+ __le16 min_rsscos_ctx;
+ __le16 max_rsscos_ctx;
+ __le16 min_cmpl_rings;
+ __le16 max_cmpl_rings;
+ __le16 min_tx_rings;
+ __le16 max_tx_rings;
+ __le16 min_rx_rings;
+ __le16 max_rx_rings;
+ __le16 min_l2_ctxs;
+ __le16 max_l2_ctxs;
+ __le16 min_vnics;
+ __le16 max_vnics;
+ __le16 min_stat_ctx;
+ __le16 max_stat_ctx;
+ __le16 min_hw_ring_grps;
+ __le16 max_hw_ring_grps;
+ u8 unused_0[4];
+};
+
+/* hwrm_func_vf_resource_cfg_output (size:256b/32B) */
+struct hwrm_func_vf_resource_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 reserved_rsscos_ctx;
+ __le16 reserved_cmpl_rings;
+ __le16 reserved_tx_rings;
+ __le16 reserved_rx_rings;
+ __le16 reserved_l2_ctxs;
+ __le16 reserved_vnics;
+ __le16 reserved_stat_ctx;
+ __le16 reserved_hw_ring_grps;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_cfg_input (size:448b/56B) */
struct hwrm_port_phy_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
- #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
- #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
- __le32 enables;
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
- #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
- #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
- #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
- #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
- #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
- #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
- __le16 port_id;
- __le16 force_link_speed;
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
- u8 auto_mode;
- #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL
- u8 auto_duplex;
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL
- u8 auto_pause;
- #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
- u8 unused_0;
- __le16 auto_link_speed;
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
- __le16 auto_link_speed_mask;
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
- u8 wirespeed;
- #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
- #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
- u8 lpbk;
- #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
- #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
- #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
- u8 force_pause;
- #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
- #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
- u8 unused_1;
- __le32 preemphasis;
- __le16 eee_link_speed_mask;
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
- u8 unused_2;
- u8 unused_3;
- __le32 tx_lpi_timer;
- __le32 unused_4;
- #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
- #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
+ #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
+ #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
+ __le32 enables;
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
+ #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
+ #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
+ #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
+ #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
+ #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
+ __le16 port_id;
+ __le16 force_link_speed;
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB
+ u8 auto_mode;
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_LAST PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK
+ u8 auto_duplex;
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_LAST PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH
+ u8 auto_pause;
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
+ u8 unused_0;
+ __le16 auto_link_speed;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB
+ __le16 auto_link_speed_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
+ u8 wirespeed;
+ #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
+ #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
+ #define PORT_PHY_CFG_REQ_WIRESPEED_LAST PORT_PHY_CFG_REQ_WIRESPEED_ON
+ u8 lpbk;
+ #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
+ #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_REMOTE
+ u8 force_pause;
+ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
+ u8 unused_1;
+ __le32 preemphasis;
+ __le16 eee_link_speed_mask;
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ u8 unused_2[2];
+ __le32 tx_lpi_timer;
+ #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
+ #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
+ __le32 unused_3;
+};
+
+/* hwrm_port_phy_cfg_output (size:128b/16B) */
struct hwrm_port_phy_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_port_phy_qcfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_qcfg_input (size:192b/24B) */
struct hwrm_port_phy_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
-/* Output (96 bytes) */
+/* hwrm_port_phy_qcfg_output (size:768b/96B) */
struct hwrm_port_phy_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 link;
- #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL
- #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
- u8 unused_0;
- __le16 link_speed;
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
- u8 duplex_cfg;
- #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL
- #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL
- u8 pause;
- #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
- __le16 support_speeds;
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
- __le16 force_link_speed;
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
- u8 auto_mode;
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL
- u8 auto_pause;
- #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
- __le16 auto_link_speed;
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
- __le16 auto_link_speed_mask;
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
- u8 wirespeed;
- #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
- #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
- u8 lpbk;
- #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
- #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
- u8 force_pause;
- #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
- u8 module_status;
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
- __le32 preemphasis;
- u8 phy_maj;
- u8 phy_min;
- u8 phy_bld;
- u8 phy_type;
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL
- u8 media_type;
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL
- u8 xcvr_pkg_type;
- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL
- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL
- u8 eee_config_phy_addr;
- #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
- #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5
- u8 parallel_detect;
- #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL
- #define PORT_PHY_QCFG_RESP_RESERVED_MASK 0xfeUL
- #define PORT_PHY_QCFG_RESP_RESERVED_SFT 1
- __le16 link_partner_adv_speeds;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL
- u8 link_partner_adv_auto_mode;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 link;
+ #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK
+ u8 unused_0;
+ __le16 link_speed;
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
+ u8 duplex_cfg;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_LAST PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL
+ u8 pause;
+ #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
+ __le16 support_speeds;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
+ __le16 force_link_speed;
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB
+ u8 auto_mode;
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK
+ u8 auto_pause;
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
+ __le16 auto_link_speed;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB
+ __le16 auto_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
+ u8 wirespeed;
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_LAST PORT_PHY_QCFG_RESP_WIRESPEED_ON
+ u8 lpbk;
+ #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_REMOTE
+ u8 force_pause;
+ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
+ u8 module_status;
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE
+ __le32 preemphasis;
+ u8 phy_maj;
+ u8 phy_min;
+ u8 phy_bld;
+ u8 phy_type;
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX
+ u8 media_type;
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_LAST PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE
+ u8 xcvr_pkg_type;
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL
+ u8 eee_config_phy_addr;
+ #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
+ #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL
+ u8 parallel_detect;
+ #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL
+ __le16 link_partner_adv_speeds;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL
+ u8 link_partner_adv_auto_mode;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW 0x3UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL
- u8 link_partner_adv_pause;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
- __le16 adv_eee_link_speed_mask;
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
- __le16 link_partner_adv_eee_link_speed_mask;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
- __le32 xcvr_identifier_type_tx_lpi_timer;
- #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL
- #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK
+ u8 link_partner_adv_pause;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
+ __le16 adv_eee_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le16 link_partner_adv_eee_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le32 xcvr_identifier_type_tx_lpi_timer;
+ #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL
+ #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN (0x0UL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP (0x3UL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
- __le16 fec_cfg;
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
- u8 duplex_state;
- #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
- #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
- u8 unused_1;
- char phy_vendor_name[16];
- char phy_vendor_partnumber[16];
- __le32 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 unused_5;
- u8 valid;
-};
-
-/* hwrm_port_mac_cfg */
-/* Input (40 bytes) */
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28
+ __le16 fec_cfg;
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
+ u8 duplex_state;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_LAST PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
+ u8 option_flags;
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
+ char phy_vendor_name[16];
+ char phy_vendor_partnumber[16];
+ u8 unused_2[7];
+ u8 valid;
+};
+
+/* hwrm_port_mac_cfg_input (size:320b/40B) */
struct hwrm_port_mac_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
- #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL
- #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
- #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
- #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
- #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
- #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
- #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
- #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
- __le32 enables;
- #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
- #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
- #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL
- #define PORT_MAC_CFG_REQ_ENABLES_RESERVED1 0x8UL
- #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
- #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
- #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
- #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
- #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
- __le16 port_id;
- u8 ipg;
- u8 lpbk;
- #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL
- #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL
- #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
- u8 vlan_pri2cos_map_pri;
- u8 reserved1;
- u8 tunnel_pri2cos_map_pri;
- u8 dscp2pri_map_pri;
- __le16 rx_ts_capture_ptp_msg_type;
- __le16 tx_ts_capture_ptp_msg_type;
- u8 cos_field_cfg;
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
- u8 unused_0[3];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
+ __le32 enables;
+ #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
+ #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
+ #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
+ #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
+ #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
+ #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
+ __le16 port_id;
+ u8 ipg;
+ u8 lpbk;
+ #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
+ #define PORT_MAC_CFG_REQ_LPBK_LAST PORT_MAC_CFG_REQ_LPBK_REMOTE
+ u8 vlan_pri2cos_map_pri;
+ u8 reserved1;
+ u8 tunnel_pri2cos_map_pri;
+ u8 dscp2pri_map_pri;
+ __le16 rx_ts_capture_ptp_msg_type;
+ __le16 tx_ts_capture_ptp_msg_type;
+ u8 cos_field_cfg;
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
+ u8 unused_0[3];
+};
+
+/* hwrm_port_mac_cfg_output (size:128b/16B) */
struct hwrm_port_mac_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 mru;
- __le16 mtu;
- u8 ipg;
- u8 lpbk;
- #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL
- #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL
- #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_port_mac_ptp_qcfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mru;
+ __le16 mtu;
+ u8 ipg;
+ u8 lpbk;
+ #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
+ #define PORT_MAC_CFG_RESP_LPBK_LAST PORT_MAC_CFG_RESP_LPBK_REMOTE
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */
struct hwrm_port_mac_ptp_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
-/* Output (80 bytes) */
+/* hwrm_port_mac_ptp_qcfg_output (size:640b/80B) */
struct hwrm_port_mac_ptp_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL
- u8 unused_0;
- __le16 unused_1;
- __le32 rx_ts_reg_off_lower;
- __le32 rx_ts_reg_off_upper;
- __le32 rx_ts_reg_off_seq_id;
- __le32 rx_ts_reg_off_src_id_0;
- __le32 rx_ts_reg_off_src_id_1;
- __le32 rx_ts_reg_off_src_id_2;
- __le32 rx_ts_reg_off_domain_id;
- __le32 rx_ts_reg_off_fifo;
- __le32 rx_ts_reg_off_fifo_adv;
- __le32 rx_ts_reg_off_granularity;
- __le32 tx_ts_reg_off_lower;
- __le32 tx_ts_reg_off_upper;
- __le32 tx_ts_reg_off_seq_id;
- __le32 tx_ts_reg_off_fifo;
- __le32 tx_ts_reg_off_granularity;
- __le32 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 unused_5;
- u8 valid;
-};
-
-/* hwrm_port_qstats */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL
+ u8 unused_0[3];
+ __le32 rx_ts_reg_off_lower;
+ __le32 rx_ts_reg_off_upper;
+ __le32 rx_ts_reg_off_seq_id;
+ __le32 rx_ts_reg_off_src_id_0;
+ __le32 rx_ts_reg_off_src_id_1;
+ __le32 rx_ts_reg_off_src_id_2;
+ __le32 rx_ts_reg_off_domain_id;
+ __le32 rx_ts_reg_off_fifo;
+ __le32 rx_ts_reg_off_fifo_adv;
+ __le32 rx_ts_reg_off_granularity;
+ __le32 tx_ts_reg_off_lower;
+ __le32 tx_ts_reg_off_upper;
+ __le32 tx_ts_reg_off_seq_id;
+ __le32 tx_ts_reg_off_fifo;
+ __le32 tx_ts_reg_off_granularity;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_port_qstats_input (size:320b/40B) */
struct hwrm_port_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2[3];
- u8 unused_3;
- __le64 tx_stat_host_addr;
- __le64 rx_stat_host_addr;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+ __le64 tx_stat_host_addr;
+ __le64 rx_stat_host_addr;
+};
+
+/* hwrm_port_qstats_output (size:128b/16B) */
struct hwrm_port_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 tx_stat_size;
- __le16 rx_stat_size;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_port_lpbk_qstats */
-/* Input (16 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tx_stat_size;
+ __le16 rx_stat_size;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_port_lpbk_qstats_input (size:128b/16B) */
struct hwrm_port_lpbk_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (96 bytes) */
+/* hwrm_port_lpbk_qstats_output (size:768b/96B) */
struct hwrm_port_lpbk_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 lpbk_ucast_frames;
- __le64 lpbk_mcast_frames;
- __le64 lpbk_bcast_frames;
- __le64 lpbk_ucast_bytes;
- __le64 lpbk_mcast_bytes;
- __le64 lpbk_bcast_bytes;
- __le64 tx_stat_discard;
- __le64 tx_stat_error;
- __le64 rx_stat_discard;
- __le64 rx_stat_error;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_port_clr_stats */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 lpbk_ucast_frames;
+ __le64 lpbk_mcast_frames;
+ __le64 lpbk_bcast_frames;
+ __le64 lpbk_ucast_bytes;
+ __le64 lpbk_mcast_bytes;
+ __le64 lpbk_bcast_bytes;
+ __le64 tx_stat_discard;
+ __le64 tx_stat_error;
+ __le64 rx_stat_discard;
+ __le64 rx_stat_error;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_clr_stats_input (size:192b/24B) */
struct hwrm_port_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_port_clr_stats_output (size:128b/16B) */
struct hwrm_port_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_port_lpbk_clr_stats */
-/* Input (16 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_lpbk_clr_stats_input (size:128b/16B) */
struct hwrm_port_lpbk_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (16 bytes) */
+/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
struct hwrm_port_lpbk_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_port_phy_qcaps */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_qcaps_input (size:192b/24B) */
struct hwrm_port_phy_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
-/* Output (24 bytes) */
+/* hwrm_port_phy_qcaps_output (size:192b/24B) */
struct hwrm_port_phy_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfeUL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 1
- u8 port_cnt;
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL
- __le16 supported_speeds_force_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
- __le16 supported_speeds_auto_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
- __le16 supported_speeds_eee_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL
- __le32 tx_lpi_timer_low;
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0
- #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL
- #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24
- __le32 valid_tx_lpi_timer_high;
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
- #define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL
- #define PORT_PHY_QCAPS_RESP_VALID_SFT 24
-};
-
-/* hwrm_port_phy_i2c_read */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfeUL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 1
+ u8 port_cnt;
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_4
+ __le16 supported_speeds_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
+ __le16 supported_speeds_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
+ __le16 supported_speeds_eee_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL
+ __le32 tx_lpi_timer_low;
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0
+ #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24
+ __le32 valid_tx_lpi_timer_high;
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
+ #define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_VALID_SFT 24
+};
+
+/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
struct hwrm_port_phy_i2c_read_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- __le32 enables;
- #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL
- __le16 port_id;
- u8 i2c_slave_addr;
- u8 unused_0;
- __le16 page_number;
- __le16 page_offset;
- u8 data_length;
- u8 unused_1[7];
-};
-
-/* Output (80 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL
+ __le16 port_id;
+ u8 i2c_slave_addr;
+ u8 unused_0;
+ __le16 page_number;
+ __le16 page_offset;
+ u8 data_length;
+ u8 unused_1[7];
+};
+
+/* hwrm_port_phy_i2c_read_output (size:640b/80B) */
struct hwrm_port_phy_i2c_read_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 data[16];
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_port_led_cfg */
-/* Input (64 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 data[16];
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_led_cfg_input (size:512b/64B) */
struct hwrm_port_led_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL
- __le16 port_id;
- u8 num_leds;
- u8 rsvd;
- u8 led0_id;
- u8 led0_state;
- #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL
- u8 led0_color;
- #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL
- u8 unused_0;
- __le16 led0_blink_on;
- __le16 led0_blink_off;
- u8 led0_group_id;
- u8 rsvd0;
- u8 led1_id;
- u8 led1_state;
- #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL
- u8 led1_color;
- #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL
- u8 unused_1;
- __le16 led1_blink_on;
- __le16 led1_blink_off;
- u8 led1_group_id;
- u8 rsvd1;
- u8 led2_id;
- u8 led2_state;
- #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL
- u8 led2_color;
- #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL
- u8 unused_2;
- __le16 led2_blink_on;
- __le16 led2_blink_off;
- u8 led2_group_id;
- u8 rsvd2;
- u8 led3_id;
- u8 led3_state;
- #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL
- u8 led3_color;
- #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL
- u8 unused_3;
- __le16 led3_blink_on;
- __le16 led3_blink_off;
- u8 led3_group_id;
- u8 rsvd3;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL
+ __le16 port_id;
+ u8 num_leds;
+ u8 rsvd;
+ u8 led0_id;
+ u8 led0_state;
+ #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_LAST PORT_LED_CFG_REQ_LED0_STATE_BLINKALT
+ u8 led0_color;
+ #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_LAST PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER
+ u8 unused_0;
+ __le16 led0_blink_on;
+ __le16 led0_blink_off;
+ u8 led0_group_id;
+ u8 rsvd0;
+ u8 led1_id;
+ u8 led1_state;
+ #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_LAST PORT_LED_CFG_REQ_LED1_STATE_BLINKALT
+ u8 led1_color;
+ #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_LAST PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER
+ u8 unused_1;
+ __le16 led1_blink_on;
+ __le16 led1_blink_off;
+ u8 led1_group_id;
+ u8 rsvd1;
+ u8 led2_id;
+ u8 led2_state;
+ #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_LAST PORT_LED_CFG_REQ_LED2_STATE_BLINKALT
+ u8 led2_color;
+ #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_LAST PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER
+ u8 unused_2;
+ __le16 led2_blink_on;
+ __le16 led2_blink_off;
+ u8 led2_group_id;
+ u8 rsvd2;
+ u8 led3_id;
+ u8 led3_state;
+ #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_LAST PORT_LED_CFG_REQ_LED3_STATE_BLINKALT
+ u8 led3_color;
+ #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_LAST PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER
+ u8 unused_3;
+ __le16 led3_blink_on;
+ __le16 led3_blink_off;
+ u8 led3_group_id;
+ u8 rsvd3;
+};
+
+/* hwrm_port_led_cfg_output (size:128b/16B) */
struct hwrm_port_led_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_port_led_qcaps */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_led_qcfg_input (size:192b/24B) */
+struct hwrm_port_led_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_led_qcfg_output (size:448b/56B) */
+struct hwrm_port_led_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_leds;
+ u8 led0_id;
+ u8 led0_type;
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_LAST PORT_LED_QCFG_RESP_LED0_TYPE_INVALID
+ u8 led0_state;
+ #define PORT_LED_QCFG_RESP_LED0_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_LAST PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT
+ u8 led0_color;
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_LAST PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER
+ u8 unused_0;
+ __le16 led0_blink_on;
+ __le16 led0_blink_off;
+ u8 led0_group_id;
+ u8 led1_id;
+ u8 led1_type;
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_LAST PORT_LED_QCFG_RESP_LED1_TYPE_INVALID
+ u8 led1_state;
+ #define PORT_LED_QCFG_RESP_LED1_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_LAST PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT
+ u8 led1_color;
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_LAST PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER
+ u8 unused_1;
+ __le16 led1_blink_on;
+ __le16 led1_blink_off;
+ u8 led1_group_id;
+ u8 led2_id;
+ u8 led2_type;
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_LAST PORT_LED_QCFG_RESP_LED2_TYPE_INVALID
+ u8 led2_state;
+ #define PORT_LED_QCFG_RESP_LED2_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_LAST PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT
+ u8 led2_color;
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_LAST PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER
+ u8 unused_2;
+ __le16 led2_blink_on;
+ __le16 led2_blink_off;
+ u8 led2_group_id;
+ u8 led3_id;
+ u8 led3_type;
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_LAST PORT_LED_QCFG_RESP_LED3_TYPE_INVALID
+ u8 led3_state;
+ #define PORT_LED_QCFG_RESP_LED3_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_LAST PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT
+ u8 led3_color;
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_LAST PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER
+ u8 unused_3;
+ __le16 led3_blink_on;
+ __le16 led3_blink_off;
+ u8 led3_group_id;
+ u8 unused_4[6];
+ u8 valid;
+};
+
+/* hwrm_port_led_qcaps_input (size:192b/24B) */
struct hwrm_port_led_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
-/* Output (48 bytes) */
+/* hwrm_port_led_qcaps_output (size:384b/48B) */
struct hwrm_port_led_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_leds;
- u8 unused_0[3];
- u8 led0_id;
- u8 led0_type;
- #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL
- u8 led0_group_id;
- u8 unused_1;
- __le16 led0_state_caps;
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led0_color_caps;
- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 led1_id;
- u8 led1_type;
- #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL
- u8 led1_group_id;
- u8 unused_2;
- __le16 led1_state_caps;
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led1_color_caps;
- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 led2_id;
- u8 led2_type;
- #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL
- u8 led2_group_id;
- u8 unused_3;
- __le16 led2_state_caps;
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led2_color_caps;
- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 led3_id;
- u8 led3_type;
- #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL
- u8 led3_group_id;
- u8 unused_4;
- __le16 led3_state_caps;
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led3_color_caps;
- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 unused_5;
- u8 unused_6;
- u8 unused_7;
- u8 valid;
-};
-
-/* hwrm_queue_qportcfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_leds;
+ u8 unused[3];
+ u8 led0_id;
+ u8 led0_type;
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_LAST PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID
+ u8 led0_group_id;
+ u8 unused_0;
+ __le16 led0_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led0_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 led1_id;
+ u8 led1_type;
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_LAST PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID
+ u8 led1_group_id;
+ u8 unused_1;
+ __le16 led1_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led1_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 led2_id;
+ u8 led2_type;
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_LAST PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID
+ u8 led2_group_id;
+ u8 unused_2;
+ __le16 led2_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led2_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 led3_id;
+ u8 led3_type;
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_LAST PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID
+ u8 led3_group_id;
+ u8 unused_3;
+ __le16 led3_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led3_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 unused_4[3];
+ u8 valid;
+};
+
+/* hwrm_queue_qportcfg_input (size:192b/24B) */
struct hwrm_queue_qportcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
- __le16 port_id;
- __le16 unused_0;
-};
-
-/* Output (32 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
+ __le16 port_id;
+ u8 unused_0[2];
+};
+
+/* hwrm_queue_qportcfg_output (size:256b/32B) */
struct hwrm_queue_qportcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 max_configurable_queues;
- u8 max_configurable_lossless_queues;
- u8 queue_cfg_allowed;
- u8 queue_cfg_info;
- #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
- u8 queue_pfcenable_cfg_allowed;
- u8 queue_pri2cos_cfg_allowed;
- u8 queue_cos2bw_cfg_allowed;
- u8 queue_id0;
- u8 queue_id0_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 max_configurable_queues;
+ u8 max_configurable_lossless_queues;
+ u8 queue_cfg_allowed;
+ u8 queue_cfg_info;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ u8 queue_pfcenable_cfg_allowed;
+ u8 queue_pri2cos_cfg_allowed;
+ u8 queue_cos2bw_cfg_allowed;
+ u8 queue_id0;
+ u8 queue_id0_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 queue_id1;
- u8 queue_id1_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id1;
+ u8 queue_id1_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 queue_id2;
- u8 queue_id2_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id2;
+ u8 queue_id2_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 queue_id3;
- u8 queue_id3_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id3;
+ u8 queue_id3_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 queue_id4;
- u8 queue_id4_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id4;
+ u8 queue_id4_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 queue_id5;
- u8 queue_id5_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id5;
+ u8 queue_id5_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 queue_id6;
- u8 queue_id6_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id6;
+ u8 queue_id6_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 queue_id7;
- u8 queue_id7_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id7;
+ u8 queue_id7_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 valid;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
+ u8 valid;
};
-/* hwrm_queue_cfg */
-/* Input (40 bytes) */
+/* hwrm_queue_cfg_input (size:320b/40B) */
struct hwrm_queue_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0
- #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
- __le32 enables;
- #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
- #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
- __le32 queue_id;
- __le32 dflt_len;
- u8 service_profile;
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 unused_0[7];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0
+ #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
+ __le32 enables;
+ #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
+ #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
+ __le32 queue_id;
+ __le32 dflt_len;
+ u8 service_profile;
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LAST QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN
+ u8 unused_0[7];
+};
+
+/* hwrm_queue_cfg_output (size:128b/16B) */
struct hwrm_queue_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_queue_pfcenable_qcfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcenable_qcfg_input (size:192b/24B) */
struct hwrm_queue_pfcenable_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */
struct hwrm_queue_pfcenable_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_queue_pfcenable_cfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcenable_cfg_input (size:192b/24B) */
struct hwrm_queue_pfcenable_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL
@@ -2440,1729 +2703,1664 @@ struct hwrm_queue_pfcenable_cfg_input {
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL
- __le16 port_id;
- __le16 unused_0;
+ __le16 port_id;
+ u8 unused_0[2];
};
-/* Output (16 bytes) */
+/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */
struct hwrm_queue_pfcenable_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_queue_pri2cos_qcfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pri2cos_qcfg_input (size:192b/24B) */
struct hwrm_queue_pri2cos_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN 0x2UL
- u8 port_id;
- u8 unused_0[3];
-};
-
-/* Output (24 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN 0x2UL
+ u8 port_id;
+ u8 unused_0[3];
+};
+
+/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */
struct hwrm_queue_pri2cos_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 pri0_cos_queue_id;
- u8 pri1_cos_queue_id;
- u8 pri2_cos_queue_id;
- u8 pri3_cos_queue_id;
- u8 pri4_cos_queue_id;
- u8 pri5_cos_queue_id;
- u8 pri6_cos_queue_id;
- u8 pri7_cos_queue_id;
- u8 queue_cfg_info;
- #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
- u8 unused_0;
- __le16 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_queue_pri2cos_cfg */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 pri0_cos_queue_id;
+ u8 pri1_cos_queue_id;
+ u8 pri2_cos_queue_id;
+ u8 pri3_cos_queue_id;
+ u8 pri4_cos_queue_id;
+ u8 pri5_cos_queue_id;
+ u8 pri6_cos_queue_id;
+ u8 pri7_cos_queue_id;
+ u8 queue_cfg_info;
+ #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_queue_pri2cos_cfg_input (size:320b/40B) */
struct hwrm_queue_pri2cos_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR (0x2UL << 0)
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL
- __le32 enables;
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL
- u8 port_id;
- u8 pri0_cos_queue_id;
- u8 pri1_cos_queue_id;
- u8 pri2_cos_queue_id;
- u8 pri3_cos_queue_id;
- u8 pri4_cos_queue_id;
- u8 pri5_cos_queue_id;
- u8 pri6_cos_queue_id;
- u8 pri7_cos_queue_id;
- u8 unused_0[7];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL
+ __le32 enables;
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL
+ u8 port_id;
+ u8 pri0_cos_queue_id;
+ u8 pri1_cos_queue_id;
+ u8 pri2_cos_queue_id;
+ u8 pri3_cos_queue_id;
+ u8 pri4_cos_queue_id;
+ u8 pri5_cos_queue_id;
+ u8 pri6_cos_queue_id;
+ u8 pri7_cos_queue_id;
+ u8 unused_0[7];
+};
+
+/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */
struct hwrm_queue_pri2cos_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_queue_cos2bw_qcfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_cos2bw_qcfg_input (size:192b/24B) */
struct hwrm_queue_cos2bw_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
-/* Output (112 bytes) */
+/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */
struct hwrm_queue_cos2bw_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 queue_id0;
- u8 unused_0;
- __le16 unused_1;
- __le32 queue_id0_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id0_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id0_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 queue_id0;
+ u8 unused_0;
+ __le16 unused_1;
+ __le32 queue_id0_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id0_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id0_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id0_pri_lvl;
- u8 queue_id0_bw_weight;
- u8 queue_id1;
- __le32 queue_id1_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id1_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id1_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id0_pri_lvl;
+ u8 queue_id0_bw_weight;
+ u8 queue_id1;
+ __le32 queue_id1_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id1_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id1_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id1_pri_lvl;
- u8 queue_id1_bw_weight;
- u8 queue_id2;
- __le32 queue_id2_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id2_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id2_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id1_pri_lvl;
+ u8 queue_id1_bw_weight;
+ u8 queue_id2;
+ __le32 queue_id2_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id2_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id2_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id2_pri_lvl;
- u8 queue_id2_bw_weight;
- u8 queue_id3;
- __le32 queue_id3_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id3_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id3_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id2_pri_lvl;
+ u8 queue_id2_bw_weight;
+ u8 queue_id3;
+ __le32 queue_id3_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id3_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id3_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id3_pri_lvl;
- u8 queue_id3_bw_weight;
- u8 queue_id4;
- __le32 queue_id4_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id4_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id4_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id3_pri_lvl;
+ u8 queue_id3_bw_weight;
+ u8 queue_id4;
+ __le32 queue_id4_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id4_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id4_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id4_pri_lvl;
- u8 queue_id4_bw_weight;
- u8 queue_id5;
- __le32 queue_id5_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id5_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id5_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id4_pri_lvl;
+ u8 queue_id4_bw_weight;
+ u8 queue_id5;
+ __le32 queue_id5_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id5_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id5_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id5_pri_lvl;
- u8 queue_id5_bw_weight;
- u8 queue_id6;
- __le32 queue_id6_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id6_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id6_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id5_pri_lvl;
+ u8 queue_id5_bw_weight;
+ u8 queue_id6;
+ __le32 queue_id6_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id6_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id6_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id6_pri_lvl;
- u8 queue_id6_bw_weight;
- u8 queue_id7;
- __le32 queue_id7_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id7_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id7_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id6_pri_lvl;
+ u8 queue_id6_bw_weight;
+ u8 queue_id7;
+ __le32 queue_id7_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id7_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id7_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id7_pri_lvl;
- u8 queue_id7_bw_weight;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 unused_5;
- u8 valid;
-};
-
-/* hwrm_queue_cos2bw_cfg */
-/* Input (128 bytes) */
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id7_pri_lvl;
+ u8 queue_id7_bw_weight;
+ u8 unused_2[4];
+ u8 valid;
+};
+
+/* hwrm_queue_cos2bw_cfg_input (size:1024b/128B) */
struct hwrm_queue_cos2bw_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- __le32 enables;
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL
- __le16 port_id;
- u8 queue_id0;
- u8 unused_0;
- __le32 queue_id0_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id0_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id0_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL
+ __le16 port_id;
+ u8 queue_id0;
+ u8 unused_0;
+ __le32 queue_id0_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id0_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id0_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id0_pri_lvl;
- u8 queue_id0_bw_weight;
- u8 queue_id1;
- __le32 queue_id1_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id1_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id1_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id0_pri_lvl;
+ u8 queue_id0_bw_weight;
+ u8 queue_id1;
+ __le32 queue_id1_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id1_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id1_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id1_pri_lvl;
- u8 queue_id1_bw_weight;
- u8 queue_id2;
- __le32 queue_id2_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id2_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id2_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id1_pri_lvl;
+ u8 queue_id1_bw_weight;
+ u8 queue_id2;
+ __le32 queue_id2_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id2_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id2_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id2_pri_lvl;
- u8 queue_id2_bw_weight;
- u8 queue_id3;
- __le32 queue_id3_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id3_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id3_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id2_pri_lvl;
+ u8 queue_id2_bw_weight;
+ u8 queue_id3;
+ __le32 queue_id3_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id3_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id3_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id3_pri_lvl;
- u8 queue_id3_bw_weight;
- u8 queue_id4;
- __le32 queue_id4_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id4_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id4_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id3_pri_lvl;
+ u8 queue_id3_bw_weight;
+ u8 queue_id4;
+ __le32 queue_id4_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id4_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id4_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id4_pri_lvl;
- u8 queue_id4_bw_weight;
- u8 queue_id5;
- __le32 queue_id5_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id5_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id5_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id4_pri_lvl;
+ u8 queue_id4_bw_weight;
+ u8 queue_id5;
+ __le32 queue_id5_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id5_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id5_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id5_pri_lvl;
- u8 queue_id5_bw_weight;
- u8 queue_id6;
- __le32 queue_id6_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id6_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id6_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id5_pri_lvl;
+ u8 queue_id5_bw_weight;
+ u8 queue_id6;
+ __le32 queue_id6_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id6_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id6_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id6_pri_lvl;
- u8 queue_id6_bw_weight;
- u8 queue_id7;
- __le32 queue_id7_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id7_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id7_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id6_pri_lvl;
+ u8 queue_id6_bw_weight;
+ u8 queue_id7;
+ __le32 queue_id7_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id7_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id7_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id7_pri_lvl;
- u8 queue_id7_bw_weight;
- u8 unused_1[5];
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id7_pri_lvl;
+ u8 queue_id7_bw_weight;
+ u8 unused_1[5];
};
-/* Output (16 bytes) */
+/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */
struct hwrm_queue_cos2bw_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_queue_dscp_qcaps */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */
struct hwrm_queue_dscp_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 port_id;
- u8 unused_0[7];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 port_id;
+ u8 unused_0[7];
};
-/* Output (16 bytes) */
+/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */
struct hwrm_queue_dscp_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_dscp_bits;
- u8 unused_0;
- __le16 max_entries;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_queue_dscp2pri_qcfg */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_dscp_bits;
+ u8 unused_0;
+ __le16 max_entries;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */
struct hwrm_queue_dscp2pri_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 dest_data_addr;
- u8 port_id;
- u8 unused_0;
- __le16 dest_data_buffer_size;
- __le32 unused_1;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ u8 port_id;
+ u8 unused_0;
+ __le16 dest_data_buffer_size;
+ u8 unused_1[4];
+};
+
+/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */
struct hwrm_queue_dscp2pri_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 entry_cnt;
- u8 default_pri;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_queue_dscp2pri_cfg */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 entry_cnt;
+ u8 default_pri;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */
struct hwrm_queue_dscp2pri_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 src_data_addr;
- __le32 flags;
- #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL
- __le32 enables;
- #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL
- u8 port_id;
- u8 default_pri;
- __le16 entry_cnt;
- __le32 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le32 flags;
+ #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL
+ __le32 enables;
+ #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL
+ u8 port_id;
+ u8 default_pri;
+ __le16 entry_cnt;
+ u8 unused_0[4];
+};
+
+/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */
struct hwrm_queue_dscp2pri_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_vnic_alloc */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_alloc_input (size:192b/24B) */
struct hwrm_vnic_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_vnic_alloc_output (size:128b/16B) */
struct hwrm_vnic_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 vnic_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_vnic_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 vnic_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_vnic_free_input (size:192b/24B) */
struct hwrm_vnic_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 vnic_id;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_vnic_free_output (size:128b/16B) */
struct hwrm_vnic_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_vnic_cfg */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_cfg_input (size:320b/40B) */
struct hwrm_vnic_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
- #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
- #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
- #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
- #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
- #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
- #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
- __le32 enables;
- #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
- #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
- #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
- #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
- #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
- __le16 vnic_id;
- __le16 dflt_ring_grp;
- __le16 rss_rule;
- __le16 cos_rule;
- __le16 lb_rule;
- __le16 mru;
- __le32 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
+ #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
+ #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
+ #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
+ __le32 enables;
+ #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
+ #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
+ #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
+ #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
+ #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
+ __le16 vnic_id;
+ __le16 dflt_ring_grp;
+ __le16 rss_rule;
+ __le16 cos_rule;
+ __le16 lb_rule;
+ __le16 mru;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_cfg_output (size:128b/16B) */
struct hwrm_vnic_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_vnic_qcaps */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_qcaps_input (size:192b/24B) */
struct hwrm_vnic_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ u8 unused_0[4];
};
-/* Output (24 bytes) */
+/* hwrm_vnic_qcaps_output (size:192b/24B) */
struct hwrm_vnic_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 mru;
- u8 unused_0;
- u8 unused_1;
- __le32 flags;
- #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
- #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
- #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRROING_CAPABLE_VNIC_CAP 0x40UL
- __le32 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 unused_5;
- u8 valid;
-};
-
-/* hwrm_vnic_tpa_cfg */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mru;
+ u8 unused_0[2];
+ __le32 flags;
+ #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
+ #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
+ #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_tpa_cfg_input (size:320b/40B) */
struct hwrm_vnic_tpa_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL
- #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL
- #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL
- #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL
- #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL
- #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
- #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL
- #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL
- __le32 enables;
- #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL
- #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
- #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL
- #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
- __le16 vnic_id;
- __le16 max_agg_segs;
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL
- __le16 max_aggs;
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL
- u8 unused_0;
- u8 unused_1;
- __le32 max_agg_timer;
- __le32 min_agg_len;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL
+ __le32 enables;
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
+ __le16 vnic_id;
+ __le16 max_agg_segs;
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_LAST VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX
+ __le16 max_aggs;
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_LAST VNIC_TPA_CFG_REQ_MAX_AGGS_MAX
+ u8 unused_0[2];
+ __le32 max_agg_timer;
+ __le32 min_agg_len;
+};
+
+/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */
struct hwrm_vnic_tpa_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_vnic_rss_cfg */
-/* Input (48 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_tpa_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vnic_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */
+struct hwrm_vnic_tpa_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define VNIC_TPA_QCFG_RESP_FLAGS_TPA 0x1UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_ENCAP_TPA 0x2UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_RSC_WND_UPDATE 0x4UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO 0x8UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_ECN 0x10UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_IPID_CHECK 0x40UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_TTL_CHECK 0x80UL
+ __le16 max_agg_segs;
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_1 0x0UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_2 0x1UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_4 0x2UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_8 0x3UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX 0x1fUL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX
+ __le16 max_aggs;
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_1 0x0UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_2 0x1UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_4 0x2UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_8 0x3UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_16 0x4UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX 0x7UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX
+ __le32 max_agg_timer;
+ __le32 min_agg_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cfg_input (size:384b/48B) */
struct hwrm_vnic_rss_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 hash_type;
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
- __le32 unused_0;
- __le64 ring_grp_tbl_addr;
- __le64 hash_key_tbl_addr;
- __le16 rss_ctx_idx;
- __le16 unused_1[3];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 hash_type;
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
+ u8 unused_0[4];
+ __le64 ring_grp_tbl_addr;
+ __le64 hash_key_tbl_addr;
+ __le16 rss_ctx_idx;
+ u8 unused_1[6];
+};
+
+/* hwrm_vnic_rss_cfg_output (size:128b/16B) */
struct hwrm_vnic_rss_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_vnic_plcmodes_cfg */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */
struct hwrm_vnic_plcmodes_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
- __le32 enables;
- #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
- #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
- #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
- __le32 vnic_id;
- __le16 jumbo_thresh;
- __le16 hds_offset;
- __le16 hds_threshold;
- __le16 unused_0[3];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
+ __le32 enables;
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
+ __le32 vnic_id;
+ __le16 jumbo_thresh;
+ __le16 hds_offset;
+ __le16 hds_threshold;
+ u8 unused_0[6];
+};
+
+/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */
struct hwrm_vnic_plcmodes_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_vnic_rss_cos_lb_ctx_alloc */
-/* Input (16 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */
struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (16 bytes) */
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */
struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 rss_cos_lb_ctx_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_vnic_rss_cos_lb_ctx_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 rss_cos_lb_ctx_id;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */
struct hwrm_vnic_rss_cos_lb_ctx_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 rss_cos_lb_ctx_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 rss_cos_lb_ctx_id;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */
struct hwrm_vnic_rss_cos_lb_ctx_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_ring_alloc */
-/* Input (80 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_alloc_input (size:640b/80B) */
struct hwrm_ring_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define RING_ALLOC_REQ_ENABLES_RESERVED1 0x1UL
- #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
- #define RING_ALLOC_REQ_ENABLES_RESERVED3 0x4UL
- #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
- #define RING_ALLOC_REQ_ENABLES_RESERVED4 0x10UL
- #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
- u8 ring_type;
- #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
- #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
- #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- u8 unused_0;
- __le16 unused_1;
- __le64 page_tbl_addr;
- __le32 fbo;
- u8 page_size;
- u8 page_tbl_depth;
- u8 unused_2;
- u8 unused_3;
- __le32 length;
- __le16 logical_id;
- __le16 cmpl_ring_id;
- __le16 queue_id;
- u8 unused_4;
- u8 unused_5;
- __le32 reserved1;
- __le16 ring_arb_cfg;
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP (0x1UL << 0)
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ (0x2UL << 0)
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
- #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL
- #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
- u8 unused_6;
- u8 unused_7;
- __le32 reserved3;
- __le32 stat_ctx_id;
- __le32 reserved4;
- __le32 max_bw;
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0
- #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL
- #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
+ #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
+ #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
+ u8 ring_type;
+ #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
+ #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
+ #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL
+ u8 unused_0[3];
+ __le64 page_tbl_addr;
+ __le32 fbo;
+ u8 page_size;
+ u8 page_tbl_depth;
+ u8 unused_1[2];
+ __le32 length;
+ __le16 logical_id;
+ __le16 cmpl_ring_id;
+ __le16 queue_id;
+ u8 unused_2[2];
+ __le32 reserved1;
+ __le16 ring_arb_cfg;
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP 0x1UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ 0x2UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
+ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
+ __le16 unused_3;
+ __le32 reserved3;
+ __le32 stat_ctx_id;
+ __le32 reserved4;
+ __le32 max_bw;
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0
+ #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 int_mode;
- #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL
- #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL
- #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL
- #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
- u8 unused_8[3];
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 int_mode;
+ #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL
+ #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL
+ #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL
+ #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
+ #define RING_ALLOC_REQ_INT_MODE_LAST RING_ALLOC_REQ_INT_MODE_POLL
+ u8 unused_4[3];
};
-/* Output (16 bytes) */
+/* hwrm_ring_alloc_output (size:128b/16B) */
struct hwrm_ring_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 ring_id;
- __le16 logical_ring_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_ring_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 ring_id;
+ __le16 logical_ring_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_ring_free_input (size:192b/24B) */
struct hwrm_ring_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 ring_type;
- #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_FREE_REQ_RING_TYPE_TX 0x1UL
- #define RING_FREE_REQ_RING_TYPE_RX 0x2UL
- #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- u8 unused_0;
- __le16 ring_id;
- __le32 unused_1;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_FREE_REQ_RING_TYPE_TX 0x1UL
+ #define RING_FREE_REQ_RING_TYPE_RX 0x2UL
+ #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_ROCE_CMPL
+ u8 unused_0;
+ __le16 ring_id;
+ u8 unused_1[4];
+};
+
+/* hwrm_ring_free_output (size:128b/16B) */
struct hwrm_ring_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_ring_cmpl_ring_qaggint_params */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */
struct hwrm_ring_cmpl_ring_qaggint_params_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 ring_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 ring_id;
+ u8 unused_0[6];
};
-/* Output (32 bytes) */
+/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */
struct hwrm_ring_cmpl_ring_qaggint_params_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 flags;
- #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
- #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
- __le16 num_cmpl_dma_aggr;
- __le16 num_cmpl_dma_aggr_during_int;
- __le16 cmpl_aggr_dma_tmr;
- __le16 cmpl_aggr_dma_tmr_during_int;
- __le16 int_lat_tmr_min;
- __le16 int_lat_tmr_max;
- __le16 num_cmpl_aggr_int;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_ring_cmpl_ring_cfg_aggint_params */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 flags;
+ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
+ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
+ __le16 num_cmpl_dma_aggr;
+ __le16 num_cmpl_dma_aggr_during_int;
+ __le16 cmpl_aggr_dma_tmr;
+ __le16 cmpl_aggr_dma_tmr_during_int;
+ __le16 int_lat_tmr_min;
+ __le16 int_lat_tmr_max;
+ __le16 num_cmpl_aggr_int;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_input (size:320b/40B) */
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 ring_id;
- __le16 flags;
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
- __le16 num_cmpl_dma_aggr;
- __le16 num_cmpl_dma_aggr_during_int;
- __le16 cmpl_aggr_dma_tmr;
- __le16 cmpl_aggr_dma_tmr_during_int;
- __le16 int_lat_tmr_min;
- __le16 int_lat_tmr_max;
- __le16 num_cmpl_aggr_int;
- __le16 unused_0[3];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 ring_id;
+ __le16 flags;
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
+ __le16 num_cmpl_dma_aggr;
+ __le16 num_cmpl_dma_aggr_during_int;
+ __le16 cmpl_aggr_dma_tmr;
+ __le16 cmpl_aggr_dma_tmr_during_int;
+ __le16 int_lat_tmr_min;
+ __le16 int_lat_tmr_max;
+ __le16 num_cmpl_aggr_int;
+ u8 unused_0[6];
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */
struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_ring_reset */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_reset_input (size:192b/24B) */
struct hwrm_ring_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 ring_type;
- #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
- #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
- #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- u8 unused_0;
- __le16 ring_id;
- __le32 unused_1;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
+ #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
+ #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_ROCE_CMPL
+ u8 unused_0;
+ __le16 ring_id;
+ u8 unused_1[4];
+};
+
+/* hwrm_ring_reset_output (size:128b/16B) */
struct hwrm_ring_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_ring_grp_alloc */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_grp_alloc_input (size:192b/24B) */
struct hwrm_ring_grp_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 cr;
- __le16 rr;
- __le16 ar;
- __le16 sc;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 cr;
+ __le16 rr;
+ __le16 ar;
+ __le16 sc;
+};
+
+/* hwrm_ring_grp_alloc_output (size:128b/16B) */
struct hwrm_ring_grp_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 ring_group_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_ring_grp_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 ring_group_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_ring_grp_free_input (size:192b/24B) */
struct hwrm_ring_grp_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 ring_group_id;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 ring_group_id;
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_ring_grp_free_output (size:128b/16B) */
struct hwrm_ring_grp_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_l2_filter_alloc */
-/* Input (96 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */
struct hwrm_cfa_l2_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX (0x1UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
- __le32 enables;
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
- u8 l2_addr[6];
- u8 unused_0;
- u8 unused_1;
- u8 l2_addr_mask[6];
- __le16 l2_ovlan;
- __le16 l2_ovlan_mask;
- __le16 l2_ivlan;
- __le16 l2_ivlan_mask;
- u8 unused_2;
- u8 unused_3;
- u8 t_l2_addr[6];
- u8 unused_4;
- u8 unused_5;
- u8 t_l2_addr_mask[6];
- __le16 t_l2_ovlan;
- __le16 t_l2_ovlan_mask;
- __le16 t_l2_ivlan;
- __le16 t_l2_ivlan_mask;
- u8 src_type;
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL
- u8 unused_6;
- __le32 src_id;
- u8 tunnel_type;
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- u8 unused_7;
- __le16 dst_id;
- __le16 mirror_vnic_id;
- u8 pri_hint;
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL
- u8 unused_8;
- __le32 unused_9;
- __le64 l2_filter_id_hint;
-};
-
-/* Output (24 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
+ __le32 enables;
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
+ u8 l2_addr[6];
+ u8 unused_0[2];
+ u8 l2_addr_mask[6];
+ __le16 l2_ovlan;
+ __le16 l2_ovlan_mask;
+ __le16 l2_ivlan;
+ __le16 l2_ivlan_mask;
+ u8 unused_1[2];
+ u8 t_l2_addr[6];
+ u8 unused_2[2];
+ u8 t_l2_addr_mask[6];
+ __le16 t_l2_ovlan;
+ __le16 t_l2_ovlan_mask;
+ __le16 t_l2_ivlan;
+ __le16 t_l2_ivlan_mask;
+ u8 src_type;
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG
+ u8 unused_3;
+ __le32 src_id;
+ u8 tunnel_type;
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 unused_4;
+ __le16 dst_id;
+ __le16 mirror_vnic_id;
+ u8 pri_hint;
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN
+ u8 unused_5;
+ __le32 unused_6;
+ __le64 l2_filter_id_hint;
+};
+
+/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */
struct hwrm_cfa_l2_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 l2_filter_id;
- __le32 flow_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_cfa_l2_filter_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 l2_filter_id;
+ __le32 flow_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_free_input (size:192b/24B) */
struct hwrm_cfa_l2_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 l2_filter_id;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 l2_filter_id;
};
-/* Output (16 bytes) */
+/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */
struct hwrm_cfa_l2_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_l2_filter_cfg */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_cfg_input (size:320b/40B) */
struct hwrm_cfa_l2_filter_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
- __le32 enables;
- #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
- __le64 l2_filter_id;
- __le32 dst_id;
- __le32 new_mirror_vnic_id;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+ __le32 enables;
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+ __le64 l2_filter_id;
+ __le32 dst_id;
+ __le32 new_mirror_vnic_id;
+};
+
+/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */
struct hwrm_cfa_l2_filter_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_l2_set_rx_mask */
-/* Input (56 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask_input (size:448b/56B) */
struct hwrm_cfa_l2_set_rx_mask_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 vnic_id;
- __le32 mask;
- #define CFA_L2_SET_RX_MASK_REQ_MASK_RESERVED 0x1UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL
- __le64 mc_tbl_addr;
- __le32 num_mc_entries;
- __le32 unused_0;
- __le64 vlan_tag_tbl_addr;
- __le32 num_vlan_tags;
- __le32 unused_1;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ __le32 mask;
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL
+ __le64 mc_tbl_addr;
+ __le32 num_mc_entries;
+ u8 unused_0[4];
+ __le64 vlan_tag_tbl_addr;
+ __le32 num_vlan_tags;
+ u8 unused_1[4];
+};
+
+/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */
struct hwrm_cfa_l2_set_rx_mask_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */
struct hwrm_cfa_l2_set_rx_mask_cmd_err {
- u8 code;
- #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL
+ u8 code;
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL
#define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR 0x1UL
- u8 unused_0[7];
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR
+ u8 unused_0[7];
};
-/* hwrm_cfa_tunnel_filter_alloc */
-/* Input (88 bytes) */
+/* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */
struct hwrm_cfa_tunnel_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
- __le32 enables;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
- __le64 l2_filter_id;
- u8 l2_addr[6];
- __le16 l2_ivlan;
- __le32 l3_addr[4];
- __le32 t_l3_addr[4];
- u8 l3_addr_type;
- u8 t_l3_addr_type;
- u8 tunnel_type;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ __le32 enables;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
+ __le64 l2_filter_id;
+ u8 l2_addr[6];
+ __le16 l2_ivlan;
+ __le32 l3_addr[4];
+ __le32 t_l3_addr[4];
+ u8 l3_addr_type;
+ u8 t_l3_addr_type;
+ u8 tunnel_type;
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
@@ -4174,158 +4372,204 @@ struct hwrm_cfa_tunnel_filter_alloc_input {
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- u8 unused_0;
- __le32 vni;
- __le32 dst_vnic_id;
- __le32 mirror_vnic_id;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 tunnel_flags;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_EXTHDR_SEQNUM_S0 0x4UL
+ __le32 vni;
+ __le32 dst_vnic_id;
+ __le32 mirror_vnic_id;
};
-/* Output (24 bytes) */
+/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */
struct hwrm_cfa_tunnel_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 tunnel_filter_id;
- __le32 flow_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_cfa_tunnel_filter_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tunnel_filter_id;
+ __le32 flow_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_tunnel_filter_free_input (size:192b/24B) */
struct hwrm_cfa_tunnel_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 tunnel_filter_id;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 tunnel_filter_id;
};
-/* Output (16 bytes) */
+/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */
struct hwrm_cfa_tunnel_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_encap_record_alloc */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vxlan_ipv4_hdr (size:128b/16B) */
+struct hwrm_vxlan_ipv4_hdr {
+ u8 ver_hlen;
+ #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL
+ #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0
+ #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK 0xf0UL
+ #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4
+ u8 tos;
+ __be16 ip_id;
+ __be16 flags_frag_offset;
+ u8 ttl;
+ u8 protocol;
+ __be32 src_ip_addr;
+ __be32 dest_ip_addr;
+};
+
+/* hwrm_vxlan_ipv6_hdr (size:320b/40B) */
+struct hwrm_vxlan_ipv6_hdr {
+ __be32 ver_tc_flow_label;
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT 0x1cUL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK 0xf0000000UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT 0x14UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK 0xff00000UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT 0x0UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_LAST VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK
+ __be16 payload_len;
+ u8 next_hdr;
+ u8 ttl;
+ __be32 src_ip_addr[4];
+ __be32 dest_ip_addr[4];
+};
+
+/* hwrm_cfa_encap_data_vxlan (size:576b/72B) */
+struct hwrm_cfa_encap_data_vxlan {
+ u8 src_mac_addr[6];
+ __le16 unused_0;
+ u8 dst_mac_addr[6];
+ u8 num_vlan_tags;
+ u8 unused_1;
+ __be16 ovlan_tpid;
+ __be16 ovlan_tci;
+ __be16 ivlan_tpid;
+ __be16 ivlan_tci;
+ __le32 l3[10];
+ #define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL
+ #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL
+ #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL
+ #define CFA_ENCAP_DATA_VXLAN_L3_LAST CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6
+ __be16 src_port;
+ __be16 dst_port;
+ __be32 vni;
+};
+
+/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */
struct hwrm_cfa_encap_record_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
- u8 encap_type;
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
- u8 unused_0;
- __le16 unused_1;
- __le32 encap_data[20];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ u8 encap_type;
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE
+ u8 unused_0[3];
+ __le32 encap_data[20];
+};
+
+/* hwrm_cfa_encap_record_alloc_output (size:128b/16B) */
struct hwrm_cfa_encap_record_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 encap_record_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_cfa_encap_record_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 encap_record_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_encap_record_free_input (size:192b/24B) */
struct hwrm_cfa_encap_record_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 encap_record_id;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_record_id;
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_cfa_encap_record_free_output (size:128b/16B) */
struct hwrm_cfa_encap_record_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_ntuple_filter_alloc */
-/* Input (128 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */
struct hwrm_cfa_ntuple_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL
- __le32 enables;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL
- __le64 l2_filter_id;
- u8 src_macaddr[6];
- __be16 ethertype;
- u8 ip_addr_type;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
- u8 ip_protocol;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
- __le16 dst_id;
- __le16 mirror_vnic_id;
- u8 tunnel_type;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL
+ __le32 enables;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL
+ __le64 l2_filter_id;
+ u8 src_macaddr[6];
+ __be16 ethertype;
+ u8 ip_addr_type;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
+ u8 ip_protocol;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP
+ __le16 dst_id;
+ __le16 mirror_vnic_id;
+ u8 tunnel_type;
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
@@ -4337,2221 +4581,1723 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- u8 pri_hint;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST 0x3UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST 0x4UL
- __be32 src_ipaddr[4];
- __be32 src_ipaddr_mask[4];
- __be32 dst_ipaddr[4];
- __be32 dst_ipaddr_mask[4];
- __be16 src_port;
- __be16 src_port_mask;
- __be16 dst_port;
- __be16 dst_port_mask;
- __le64 ntuple_filter_id_hint;
-};
-
-/* Output (24 bytes) */
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 pri_hint;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST 0x3UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST
+ __be32 src_ipaddr[4];
+ __be32 src_ipaddr_mask[4];
+ __be32 dst_ipaddr[4];
+ __be32 dst_ipaddr_mask[4];
+ __be16 src_port;
+ __be16 src_port_mask;
+ __be16 dst_port;
+ __be16 dst_port_mask;
+ __le64 ntuple_filter_id_hint;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */
struct hwrm_cfa_ntuple_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 ntuple_filter_id;
- __le32 flow_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 ntuple_filter_id;
+ __le32 flow_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */
struct hwrm_cfa_ntuple_filter_alloc_cmd_err {
- u8 code;
- #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
+ u8 code;
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
#define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR 0x1UL
- u8 unused_0[7];
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR
+ u8 unused_0[7];
};
-/* hwrm_cfa_ntuple_filter_free */
-/* Input (24 bytes) */
+/* hwrm_cfa_ntuple_filter_free_input (size:192b/24B) */
struct hwrm_cfa_ntuple_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 ntuple_filter_id;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 ntuple_filter_id;
};
-/* Output (16 bytes) */
+/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */
struct hwrm_cfa_ntuple_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_ntuple_filter_cfg */
-/* Input (48 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_cfg_input (size:384b/48B) */
struct hwrm_cfa_ntuple_filter_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL
- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
- __le32 unused_0;
- __le64 ntuple_filter_id;
- __le32 new_dst_id;
- __le32 new_mirror_vnic_id;
- __le16 new_meter_instance_id;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
+ u8 unused_0[4];
+ __le64 ntuple_filter_id;
+ __le32 new_dst_id;
+ __le32 new_mirror_vnic_id;
+ __le16 new_meter_instance_id;
#define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL
- __le16 unused_1[3];
+ #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_LAST CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID
+ u8 unused_1[6];
};
-/* Output (16 bytes) */
+/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */
struct hwrm_cfa_ntuple_filter_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_decap_filter_alloc */
-/* Input (104 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_decap_filter_alloc_input (size:832b/104B) */
struct hwrm_cfa_decap_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL 0x1UL
- __le32 enables;
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x1UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID 0x2UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x4UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x8UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID 0x10UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID 0x20UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID 0x40UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID 0x80UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x100UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x200UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x400UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x800UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x1000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x2000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x4000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
- __be32 tunnel_id;
- u8 tunnel_type;
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- u8 unused_0;
- __le16 unused_1;
- u8 src_macaddr[6];
- u8 unused_2;
- u8 unused_3;
- u8 dst_macaddr[6];
- __be16 ovlan_vid;
- __be16 ivlan_vid;
- __be16 t_ovlan_vid;
- __be16 t_ivlan_vid;
- __be16 ethertype;
- u8 ip_addr_type;
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
- u8 ip_protocol;
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
- u8 unused_4;
- u8 unused_5;
- u8 unused_6[3];
- u8 unused_7;
- __be32 src_ipaddr[4];
- __be32 dst_ipaddr[4];
- __be16 src_port;
- __be16 dst_port;
- __le16 dst_id;
- __le16 l2_ctxt_ref_id;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL 0x1UL
+ __le32 enables;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x1UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID 0x2UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x8UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID 0x10UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID 0x20UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID 0x40UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID 0x80UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x100UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x200UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x400UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x800UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x1000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x2000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x4000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
+ __be32 tunnel_id;
+ u8 tunnel_type;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 unused_0;
+ __le16 unused_1;
+ u8 src_macaddr[6];
+ u8 unused_2[2];
+ u8 dst_macaddr[6];
+ __be16 ovlan_vid;
+ __be16 ivlan_vid;
+ __be16 t_ovlan_vid;
+ __be16 t_ivlan_vid;
+ __be16 ethertype;
+ u8 ip_addr_type;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
+ u8 ip_protocol;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP
+ __le16 unused_3;
+ __le32 unused_4;
+ __be32 src_ipaddr[4];
+ __be32 dst_ipaddr[4];
+ __be16 src_port;
+ __be16 dst_port;
+ __le16 dst_id;
+ __le16 l2_ctxt_ref_id;
+};
+
+/* hwrm_cfa_decap_filter_alloc_output (size:128b/16B) */
struct hwrm_cfa_decap_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 decap_filter_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_cfa_decap_filter_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 decap_filter_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_decap_filter_free_input (size:192b/24B) */
struct hwrm_cfa_decap_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 decap_filter_id;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 decap_filter_id;
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_cfa_decap_filter_free_output (size:128b/16B) */
struct hwrm_cfa_decap_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_flow_alloc */
-/* Input (128 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_alloc_input (size:1024b/128B) */
struct hwrm_cfa_flow_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 flags;
- #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
- __le16 src_fid;
- __le32 tunnel_handle;
- __le16 action_flags;
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
- __le16 dst_fid;
- __be16 l2_rewrite_vlan_tpid;
- __be16 l2_rewrite_vlan_tci;
- __le16 act_meter_id;
- __le16 ref_flow_handle;
- __be16 ethertype;
- __be16 outer_vlan_tci;
- __be16 dmac[3];
- __be16 inner_vlan_tci;
- __be16 smac[3];
- u8 ip_dst_mask_len;
- u8 ip_src_mask_len;
- __be32 ip_dst[4];
- __be32 ip_src[4];
- __be16 l4_src_port;
- __be16 l4_src_port_mask;
- __be16 l4_dst_port;
- __be16 l4_dst_port_mask;
- __be32 nat_ip_address[4];
- __be16 l2_rewrite_dmac[3];
- __be16 nat_port;
- __be16 l2_rewrite_smac[3];
- u8 ip_proto;
- u8 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 flags;
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
+ __le16 src_fid;
+ __le32 tunnel_handle;
+ __le16 action_flags;
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
+ __le16 dst_fid;
+ __be16 l2_rewrite_vlan_tpid;
+ __be16 l2_rewrite_vlan_tci;
+ __le16 act_meter_id;
+ __le16 ref_flow_handle;
+ __be16 ethertype;
+ __be16 outer_vlan_tci;
+ __be16 dmac[3];
+ __be16 inner_vlan_tci;
+ __be16 smac[3];
+ u8 ip_dst_mask_len;
+ u8 ip_src_mask_len;
+ __be32 ip_dst[4];
+ __be32 ip_src[4];
+ __be16 l4_src_port;
+ __be16 l4_src_port_mask;
+ __be16 l4_dst_port;
+ __be16 l4_dst_port_mask;
+ __be32 nat_ip_address[4];
+ __be16 l2_rewrite_dmac[3];
+ __be16 nat_port;
+ __be16 l2_rewrite_smac[3];
+ u8 ip_proto;
+ u8 unused_0;
+};
+
+/* hwrm_cfa_flow_alloc_output (size:128b/16B) */
struct hwrm_cfa_flow_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 flow_handle;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_cfa_flow_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 flow_handle;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_free_input (size:192b/24B) */
struct hwrm_cfa_flow_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 flow_handle;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 flow_handle;
+ u8 unused_0[6];
};
-/* Output (32 bytes) */
+/* hwrm_cfa_flow_free_output (size:256b/32B) */
struct hwrm_cfa_flow_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 packet;
- __le64 byte;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_flow_stats */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 packet;
+ __le64 byte;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_stats_input (size:320b/40B) */
struct hwrm_cfa_flow_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 num_flows;
- __le16 flow_handle_0;
- __le16 flow_handle_1;
- __le16 flow_handle_2;
- __le16 flow_handle_3;
- __le16 flow_handle_4;
- __le16 flow_handle_5;
- __le16 flow_handle_6;
- __le16 flow_handle_7;
- __le16 flow_handle_8;
- __le16 flow_handle_9;
- __le16 unused_0;
-};
-
-/* Output (176 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 num_flows;
+ __le16 flow_handle_0;
+ __le16 flow_handle_1;
+ __le16 flow_handle_2;
+ __le16 flow_handle_3;
+ __le16 flow_handle_4;
+ __le16 flow_handle_5;
+ __le16 flow_handle_6;
+ __le16 flow_handle_7;
+ __le16 flow_handle_8;
+ __le16 flow_handle_9;
+ u8 unused_0[2];
+};
+
+/* hwrm_cfa_flow_stats_output (size:1408b/176B) */
struct hwrm_cfa_flow_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 packet_0;
- __le64 packet_1;
- __le64 packet_2;
- __le64 packet_3;
- __le64 packet_4;
- __le64 packet_5;
- __le64 packet_6;
- __le64 packet_7;
- __le64 packet_8;
- __le64 packet_9;
- __le64 byte_0;
- __le64 byte_1;
- __le64 byte_2;
- __le64 byte_3;
- __le64 byte_4;
- __le64 byte_5;
- __le64 byte_6;
- __le64 byte_7;
- __le64 byte_8;
- __le64 byte_9;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_vfr_alloc */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 packet_0;
+ __le64 packet_1;
+ __le64 packet_2;
+ __le64 packet_3;
+ __le64 packet_4;
+ __le64 packet_5;
+ __le64 packet_6;
+ __le64 packet_7;
+ __le64 packet_8;
+ __le64 packet_9;
+ __le64 byte_0;
+ __le64 byte_1;
+ __le64 byte_2;
+ __le64 byte_3;
+ __le64 byte_4;
+ __le64 byte_5;
+ __le64 byte_6;
+ __le64 byte_7;
+ __le64 byte_8;
+ __le64 byte_9;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */
struct hwrm_cfa_vfr_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vf_id;
- __le16 reserved;
- __le32 unused_0;
- char vfr_name[32];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ __le16 reserved;
+ u8 unused_0[4];
+ char vfr_name[32];
+};
+
+/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */
struct hwrm_cfa_vfr_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 rx_cfa_code;
- __le16 tx_cfa_action;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_cfa_vfr_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 rx_cfa_code;
+ __le16 tx_cfa_action;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_vfr_free_input (size:384b/48B) */
struct hwrm_cfa_vfr_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- char vfr_name[32];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ char vfr_name[32];
};
-/* Output (16 bytes) */
+/* hwrm_cfa_vfr_free_output (size:128b/16B) */
struct hwrm_cfa_vfr_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_tunnel_dst_port_query */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
struct hwrm_tunnel_dst_port_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 tunnel_type;
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- u8 unused_0[7];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4
+ u8 unused_0[7];
+};
+
+/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */
struct hwrm_tunnel_dst_port_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 tunnel_dst_port_id;
- __be16 tunnel_dst_port_val;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_tunnel_dst_port_alloc */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tunnel_dst_port_id;
+ __be16 tunnel_dst_port_val;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */
struct hwrm_tunnel_dst_port_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 tunnel_type;
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- u8 unused_0;
- __be16 tunnel_dst_port_val;
- __be32 unused_1;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4
+ u8 unused_0;
+ __be16 tunnel_dst_port_val;
+ u8 unused_1[4];
+};
+
+/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */
struct hwrm_tunnel_dst_port_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 tunnel_dst_port_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_tunnel_dst_port_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tunnel_dst_port_id;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */
struct hwrm_tunnel_dst_port_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 tunnel_type;
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- u8 unused_0;
- __le16 tunnel_dst_port_id;
- __le32 unused_1;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4
+ u8 unused_0;
+ __le16 tunnel_dst_port_id;
+ u8 unused_1[4];
+};
+
+/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */
struct hwrm_tunnel_dst_port_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_stat_ctx_alloc */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* ctx_hw_stats (size:1280b/160B) */
+struct ctx_hw_stats {
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 tpa_pkts;
+ __le64 tpa_bytes;
+ __le64 tpa_events;
+ __le64 tpa_aborts;
+};
+
+/* hwrm_stat_ctx_alloc_input (size:256b/32B) */
struct hwrm_stat_ctx_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 stats_dma_addr;
- __le32 update_period_ms;
- u8 stat_ctx_flags;
- #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
- u8 unused_0[3];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 stats_dma_addr;
+ __le32 update_period_ms;
+ u8 stat_ctx_flags;
+ #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
+ u8 unused_0[3];
+};
+
+/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
struct hwrm_stat_ctx_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 stat_ctx_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_stat_ctx_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 stat_ctx_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_free_input (size:192b/24B) */
struct hwrm_stat_ctx_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 stat_ctx_id;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_stat_ctx_free_output (size:128b/16B) */
struct hwrm_stat_ctx_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 stat_ctx_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_stat_ctx_query */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 stat_ctx_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_query_input (size:192b/24B) */
struct hwrm_stat_ctx_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 stat_ctx_id;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 unused_0[4];
};
-/* Output (176 bytes) */
+/* hwrm_stat_ctx_query_output (size:1408b/176B) */
struct hwrm_stat_ctx_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_err_pkts;
- __le64 tx_drop_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_err_pkts;
- __le64 rx_drop_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 rx_agg_pkts;
- __le64 rx_agg_bytes;
- __le64 rx_agg_events;
- __le64 rx_agg_aborts;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_stat_ctx_clr_stats */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_err_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_err_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 rx_agg_pkts;
+ __le64 rx_agg_bytes;
+ __le64 rx_agg_events;
+ __le64 rx_agg_aborts;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */
struct hwrm_stat_ctx_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 stat_ctx_id;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */
struct hwrm_stat_ctx_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_fw_reset */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* tx_port_stats (size:3264b/408B) */
+struct tx_port_stats {
+ __le64 tx_64b_frames;
+ __le64 tx_65b_127b_frames;
+ __le64 tx_128b_255b_frames;
+ __le64 tx_256b_511b_frames;
+ __le64 tx_512b_1023b_frames;
+ __le64 tx_1024b_1518_frames;
+ __le64 tx_good_vlan_frames;
+ __le64 tx_1519b_2047_frames;
+ __le64 tx_2048b_4095b_frames;
+ __le64 tx_4096b_9216b_frames;
+ __le64 tx_9217b_16383b_frames;
+ __le64 tx_good_frames;
+ __le64 tx_total_frames;
+ __le64 tx_ucast_frames;
+ __le64 tx_mcast_frames;
+ __le64 tx_bcast_frames;
+ __le64 tx_pause_frames;
+ __le64 tx_pfc_frames;
+ __le64 tx_jabber_frames;
+ __le64 tx_fcs_err_frames;
+ __le64 tx_control_frames;
+ __le64 tx_oversz_frames;
+ __le64 tx_single_dfrl_frames;
+ __le64 tx_multi_dfrl_frames;
+ __le64 tx_single_coll_frames;
+ __le64 tx_multi_coll_frames;
+ __le64 tx_late_coll_frames;
+ __le64 tx_excessive_coll_frames;
+ __le64 tx_frag_frames;
+ __le64 tx_err;
+ __le64 tx_tagged_frames;
+ __le64 tx_dbl_tagged_frames;
+ __le64 tx_runt_frames;
+ __le64 tx_fifo_underruns;
+ __le64 tx_pfc_ena_frames_pri0;
+ __le64 tx_pfc_ena_frames_pri1;
+ __le64 tx_pfc_ena_frames_pri2;
+ __le64 tx_pfc_ena_frames_pri3;
+ __le64 tx_pfc_ena_frames_pri4;
+ __le64 tx_pfc_ena_frames_pri5;
+ __le64 tx_pfc_ena_frames_pri6;
+ __le64 tx_pfc_ena_frames_pri7;
+ __le64 tx_eee_lpi_events;
+ __le64 tx_eee_lpi_duration;
+ __le64 tx_llfc_logical_msgs;
+ __le64 tx_hcfc_msgs;
+ __le64 tx_total_collisions;
+ __le64 tx_bytes;
+ __le64 tx_xthol_frames;
+ __le64 tx_stat_discard;
+ __le64 tx_stat_error;
+};
+
+/* rx_port_stats (size:4224b/528B) */
+struct rx_port_stats {
+ __le64 rx_64b_frames;
+ __le64 rx_65b_127b_frames;
+ __le64 rx_128b_255b_frames;
+ __le64 rx_256b_511b_frames;
+ __le64 rx_512b_1023b_frames;
+ __le64 rx_1024b_1518_frames;
+ __le64 rx_good_vlan_frames;
+ __le64 rx_1519b_2047b_frames;
+ __le64 rx_2048b_4095b_frames;
+ __le64 rx_4096b_9216b_frames;
+ __le64 rx_9217b_16383b_frames;
+ __le64 rx_total_frames;
+ __le64 rx_ucast_frames;
+ __le64 rx_mcast_frames;
+ __le64 rx_bcast_frames;
+ __le64 rx_fcs_err_frames;
+ __le64 rx_ctrl_frames;
+ __le64 rx_pause_frames;
+ __le64 rx_pfc_frames;
+ __le64 rx_unsupported_opcode_frames;
+ __le64 rx_unsupported_da_pausepfc_frames;
+ __le64 rx_wrong_sa_frames;
+ __le64 rx_align_err_frames;
+ __le64 rx_oor_len_frames;
+ __le64 rx_code_err_frames;
+ __le64 rx_false_carrier_frames;
+ __le64 rx_ovrsz_frames;
+ __le64 rx_jbr_frames;
+ __le64 rx_mtu_err_frames;
+ __le64 rx_match_crc_frames;
+ __le64 rx_promiscuous_frames;
+ __le64 rx_tagged_frames;
+ __le64 rx_double_tagged_frames;
+ __le64 rx_trunc_frames;
+ __le64 rx_good_frames;
+ __le64 rx_pfc_xon2xoff_frames_pri0;
+ __le64 rx_pfc_xon2xoff_frames_pri1;
+ __le64 rx_pfc_xon2xoff_frames_pri2;
+ __le64 rx_pfc_xon2xoff_frames_pri3;
+ __le64 rx_pfc_xon2xoff_frames_pri4;
+ __le64 rx_pfc_xon2xoff_frames_pri5;
+ __le64 rx_pfc_xon2xoff_frames_pri6;
+ __le64 rx_pfc_xon2xoff_frames_pri7;
+ __le64 rx_pfc_ena_frames_pri0;
+ __le64 rx_pfc_ena_frames_pri1;
+ __le64 rx_pfc_ena_frames_pri2;
+ __le64 rx_pfc_ena_frames_pri3;
+ __le64 rx_pfc_ena_frames_pri4;
+ __le64 rx_pfc_ena_frames_pri5;
+ __le64 rx_pfc_ena_frames_pri6;
+ __le64 rx_pfc_ena_frames_pri7;
+ __le64 rx_sch_crc_err_frames;
+ __le64 rx_undrsz_frames;
+ __le64 rx_frag_frames;
+ __le64 rx_eee_lpi_events;
+ __le64 rx_eee_lpi_duration;
+ __le64 rx_llfc_physical_msgs;
+ __le64 rx_llfc_logical_msgs;
+ __le64 rx_llfc_msgs_with_crc_err;
+ __le64 rx_hcfc_msgs;
+ __le64 rx_hcfc_msgs_with_crc_err;
+ __le64 rx_bytes;
+ __le64 rx_runt_bytes;
+ __le64 rx_runt_frames;
+ __le64 rx_stat_discard;
+ __le64 rx_stat_err;
+};
+
+/* hwrm_fw_reset_input (size:192b/24B) */
struct hwrm_fw_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 embedded_proc_type;
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
- u8 selfrst_status;
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- u8 host_idx;
- u8 unused_0[5];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP
+ u8 selfrst_status;
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST
+ u8 host_idx;
+ u8 unused_0[5];
+};
+
+/* hwrm_fw_reset_output (size:128b/16B) */
struct hwrm_fw_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 selfrst_status;
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- u8 unused_0;
- __le16 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_fw_qstatus */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_RESET_RESP_SELFRST_STATUS_LAST FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_fw_qstatus_input (size:192b/24B) */
struct hwrm_fw_qstatus_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 embedded_proc_type;
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
- u8 unused_0[7];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_LAST FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_qstatus_output (size:128b/16B) */
struct hwrm_fw_qstatus_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 selfrst_status;
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- u8 unused_0;
- __le16 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_fw_set_time */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_fw_set_time_input (size:256b/32B) */
struct hwrm_fw_set_time_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 year;
- #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL
- u8 month;
- u8 day;
- u8 hour;
- u8 minute;
- u8 second;
- u8 unused_0;
- __le16 millisecond;
- __le16 zone;
- #define FW_SET_TIME_REQ_ZONE_UTC 0x0UL
- #define FW_SET_TIME_REQ_ZONE_UNKNOWN 0xffffUL
- __le32 unused_1;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 year;
+ #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL
+ #define FW_SET_TIME_REQ_YEAR_LAST FW_SET_TIME_REQ_YEAR_UNKNOWN
+ u8 month;
+ u8 day;
+ u8 hour;
+ u8 minute;
+ u8 second;
+ u8 unused_0;
+ __le16 millisecond;
+ __le16 zone;
+ #define FW_SET_TIME_REQ_ZONE_UTC 0x0UL
+ #define FW_SET_TIME_REQ_ZONE_UNKNOWN 0xffffUL
+ #define FW_SET_TIME_REQ_ZONE_LAST FW_SET_TIME_REQ_ZONE_UNKNOWN
+ u8 unused_1[4];
+};
+
+/* hwrm_fw_set_time_output (size:128b/16B) */
struct hwrm_fw_set_time_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_fw_set_structured_data */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_struct_hdr (size:128b/16B) */
+struct hwrm_struct_hdr {
+ __le16 struct_id;
+ #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
+ #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
+ #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
+ #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
+ #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_RSS_V2
+ __le16 len;
+ u8 version;
+ u8 count;
+ __le16 subtype;
+ __le16 next_offset;
+ #define STRUCT_HDR_NEXT_OFFSET_LAST 0x0UL
+ u8 unused_0[6];
+};
+
+/* hwrm_struct_data_dcbx_app (size:64b/8B) */
+struct hwrm_struct_data_dcbx_app {
+ __be16 protocol_id;
+ u8 protocol_selector;
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_LAST STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT
+ u8 priority;
+ u8 valid;
+ u8 unused_0[3];
+};
+
+/* hwrm_fw_set_structured_data_input (size:256b/32B) */
struct hwrm_fw_set_structured_data_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 src_data_addr;
- __le16 data_len;
- u8 hdr_cnt;
- u8 unused_0[5];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le16 data_len;
+ u8 hdr_cnt;
+ u8 unused_0[5];
+};
+
+/* hwrm_fw_set_structured_data_output (size:128b/16B) */
struct hwrm_fw_set_structured_data_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fw_set_structured_data_cmd_err (size:64b/8B) */
struct hwrm_fw_set_structured_data_cmd_err {
- u8 code;
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
- u8 unused_0[7];
+ u8 code;
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID
+ u8 unused_0[7];
};
-/* hwrm_fw_get_structured_data */
-/* Input (32 bytes) */
+/* hwrm_fw_get_structured_data_input (size:256b/32B) */
struct hwrm_fw_get_structured_data_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 dest_data_addr;
- __le16 data_len;
- __le16 structure_id;
- __le16 subtype;
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL 0xffffUL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN 0x100UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER 0x101UL
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ __le16 data_len;
+ __le16 structure_id;
+ __le16 subtype;
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_UNUSED 0x0UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL 0xffffUL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN 0x100UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER 0x101UL
#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_OPERATIONAL 0x102UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL
- u8 count;
- u8 unused_0;
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_LAST FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL
+ u8 count;
+ u8 unused_0;
};
-/* Output (16 bytes) */
+/* hwrm_fw_get_structured_data_output (size:128b/16B) */
struct hwrm_fw_get_structured_data_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 hdr_cnt;
- u8 unused_0;
- __le16 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 hdr_cnt;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_fw_get_structured_data_cmd_err (size:64b/8B) */
struct hwrm_fw_get_structured_data_cmd_err {
- u8 code;
- #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
- u8 unused_0[7];
+ u8 code;
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID
+ u8 unused_0[7];
};
-/* hwrm_exec_fwd_resp */
-/* Input (128 bytes) */
+/* hwrm_exec_fwd_resp_input (size:1024b/128B) */
struct hwrm_exec_fwd_resp_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 encap_request[26];
- __le16 encap_resp_target_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[26];
+ __le16 encap_resp_target_id;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_exec_fwd_resp_output (size:128b/16B) */
struct hwrm_exec_fwd_resp_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_reject_fwd_resp */
-/* Input (128 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_reject_fwd_resp_input (size:1024b/128B) */
struct hwrm_reject_fwd_resp_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 encap_request[26];
- __le16 encap_resp_target_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[26];
+ __le16 encap_resp_target_id;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_reject_fwd_resp_output (size:128b/16B) */
struct hwrm_reject_fwd_resp_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_fwd_resp */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fwd_resp_input (size:1024b/128B) */
struct hwrm_fwd_resp_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 encap_resp_target_id;
- __le16 encap_resp_cmpl_ring;
- __le16 encap_resp_len;
- u8 unused_0;
- u8 unused_1;
- __le64 encap_resp_addr;
- __le32 encap_resp[24];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_resp_target_id;
+ __le16 encap_resp_cmpl_ring;
+ __le16 encap_resp_len;
+ u8 unused_0;
+ u8 unused_1;
+ __le64 encap_resp_addr;
+ __le32 encap_resp[24];
+};
+
+/* hwrm_fwd_resp_output (size:128b/16B) */
struct hwrm_fwd_resp_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_fwd_async_event_cmpl */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */
struct hwrm_fwd_async_event_cmpl_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 encap_async_event_target_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2[3];
- u8 unused_3;
- __le32 encap_async_event_cmpl[4];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_async_event_target_id;
+ u8 unused_0[6];
+ __le32 encap_async_event_cmpl[4];
+};
+
+/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */
struct hwrm_fwd_async_event_cmpl_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_temp_monitor_query */
-/* Input (16 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_temp_monitor_query_input (size:128b/16B) */
struct hwrm_temp_monitor_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (16 bytes) */
+/* hwrm_temp_monitor_query_output (size:128b/16B) */
struct hwrm_temp_monitor_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 temp;
- u8 unused_0;
- __le16 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_wol_filter_alloc */
-/* Input (64 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 temp;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_alloc_input (size:512b/64B) */
struct hwrm_wol_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- __le32 enables;
- #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS 0x1UL
- #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET 0x2UL
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS 0x1UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET 0x2UL
#define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_SIZE 0x4UL
#define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_ADDR 0x8UL
#define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_ADDR 0x10UL
#define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_SIZE 0x20UL
- __le16 port_id;
- u8 wol_type;
- #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL
- #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP 0x1UL
- #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID 0xffUL
- u8 unused_0;
- __le32 unused_1;
- u8 mac_address[6];
- __le16 pattern_offset;
- __le16 pattern_buf_size;
- __le16 pattern_mask_size;
- __le32 unused_2;
- __le64 pattern_buf_addr;
- __le64 pattern_mask_addr;
-};
-
-/* Output (16 bytes) */
+ __le16 port_id;
+ u8 wol_type;
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP 0x1UL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID 0xffUL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_LAST WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID
+ u8 unused_0[5];
+ u8 mac_address[6];
+ __le16 pattern_offset;
+ __le16 pattern_buf_size;
+ __le16 pattern_mask_size;
+ u8 unused_1[4];
+ __le64 pattern_buf_addr;
+ __le64 pattern_mask_addr;
+};
+
+/* hwrm_wol_filter_alloc_output (size:128b/16B) */
struct hwrm_wol_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 wol_filter_id;
- u8 unused_0;
- __le16 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_wol_filter_free */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 wol_filter_id;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_free_input (size:256b/32B) */
struct hwrm_wol_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
#define WOL_FILTER_FREE_REQ_FLAGS_FREE_ALL_WOL_FILTERS 0x1UL
- __le32 enables;
- #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID 0x1UL
- __le16 port_id;
- u8 wol_filter_id;
- u8 unused_0[5];
+ __le32 enables;
+ #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID 0x1UL
+ __le16 port_id;
+ u8 wol_filter_id;
+ u8 unused_0[5];
};
-/* Output (16 bytes) */
+/* hwrm_wol_filter_free_output (size:128b/16B) */
struct hwrm_wol_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_wol_filter_qcfg */
-/* Input (56 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_qcfg_input (size:448b/56B) */
struct hwrm_wol_filter_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 handle;
- __le32 unused_0;
- __le64 pattern_buf_addr;
- __le16 pattern_buf_size;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3[3];
- u8 unused_4;
- __le64 pattern_mask_addr;
- __le16 pattern_mask_size;
- __le16 unused_5[3];
-};
-
-/* Output (32 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 handle;
+ u8 unused_0[4];
+ __le64 pattern_buf_addr;
+ __le16 pattern_buf_size;
+ u8 unused_1[6];
+ __le64 pattern_mask_addr;
+ __le16 pattern_mask_size;
+ u8 unused_2[6];
+};
+
+/* hwrm_wol_filter_qcfg_output (size:256b/32B) */
struct hwrm_wol_filter_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 next_handle;
- u8 wol_filter_id;
- u8 wol_type;
- #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL
- #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP 0x1UL
- #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID 0xffUL
- __le32 unused_0;
- u8 mac_address[6];
- __le16 pattern_offset;
- __le16 pattern_size;
- __le16 pattern_mask_size;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_wol_reason_qcfg */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 next_handle;
+ u8 wol_filter_id;
+ u8 wol_type;
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP 0x1UL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID 0xffUL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_LAST WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID
+ __le32 unused_0;
+ u8 mac_address[6];
+ __le16 pattern_offset;
+ __le16 pattern_size;
+ __le16 pattern_mask_size;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_wol_reason_qcfg_input (size:320b/40B) */
struct hwrm_wol_reason_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2[3];
- u8 unused_3;
- __le64 wol_pkt_buf_addr;
- __le16 wol_pkt_buf_size;
- __le16 unused_4[3];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+ __le64 wol_pkt_buf_addr;
+ __le16 wol_pkt_buf_size;
+ u8 unused_1[6];
+};
+
+/* hwrm_wol_reason_qcfg_output (size:128b/16B) */
struct hwrm_wol_reason_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 wol_filter_id;
- u8 wol_reason;
- #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL
- #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP 0x1UL
- #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID 0xffUL
- u8 wol_pkt_len;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_dbg_read_direct */
-/* Input (32 bytes) */
-struct hwrm_dbg_read_direct_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
- __le32 read_addr;
- __le32 read_len32;
-};
-
-/* Output (16 bytes) */
-struct hwrm_dbg_read_direct_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_read */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 wol_filter_id;
+ u8 wol_reason;
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP 0x1UL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID 0xffUL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_LAST WOL_REASON_QCFG_RESP_WOL_REASON_INVALID
+ u8 wol_pkt_len;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_nvm_read_input (size:320b/40B) */
struct hwrm_nvm_read_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
- __le16 dir_idx;
- u8 unused_0;
- u8 unused_1;
- __le32 offset;
- __le32 len;
- __le32 unused_2;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le16 dir_idx;
+ u8 unused_0[2];
+ __le32 offset;
+ __le32 len;
+ u8 unused_1[4];
+};
+
+/* hwrm_nvm_read_output (size:128b/16B) */
struct hwrm_nvm_read_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_get_dir_entries */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */
struct hwrm_nvm_get_dir_entries_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
};
-/* Output (16 bytes) */
+/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */
struct hwrm_nvm_get_dir_entries_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_get_dir_info */
-/* Input (16 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dir_info_input (size:128b/16B) */
struct hwrm_nvm_get_dir_info_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (24 bytes) */
+/* hwrm_nvm_get_dir_info_output (size:192b/24B) */
struct hwrm_nvm_get_dir_info_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 entries;
- __le32 entry_length;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_write */
-/* Input (48 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 entries;
+ __le32 entry_length;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_write_input (size:384b/48B) */
struct hwrm_nvm_write_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_src_addr;
- __le16 dir_type;
- __le16 dir_ordinal;
- __le16 dir_ext;
- __le16 dir_attr;
- __le32 dir_data_length;
- __le16 option;
- __le16 flags;
- #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
- __le32 dir_item_length;
- __le32 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 dir_attr;
+ __le32 dir_data_length;
+ __le16 option;
+ __le16 flags;
+ #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
+ __le32 dir_item_length;
+ __le32 unused_0;
+};
+
+/* hwrm_nvm_write_output (size:128b/16B) */
struct hwrm_nvm_write_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 dir_item_length;
- __le16 dir_idx;
- u8 unused_0;
- u8 valid;
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 dir_item_length;
+ __le16 dir_idx;
+ u8 unused_0;
+ u8 valid;
};
-/* Command specific Error Codes (8 bytes) */
+/* hwrm_nvm_write_cmd_err (size:64b/8B) */
struct hwrm_nvm_write_cmd_err {
- u8 code;
- #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL
- #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL
- u8 unused_0[7];
+ u8 code;
+ #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ #define NVM_WRITE_CMD_ERR_CODE_LAST NVM_WRITE_CMD_ERR_CODE_NO_SPACE
+ u8 unused_0[7];
};
-/* hwrm_nvm_modify */
-/* Input (40 bytes) */
+/* hwrm_nvm_modify_input (size:320b/40B) */
struct hwrm_nvm_modify_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_src_addr;
- __le16 dir_idx;
- u8 unused_0;
- u8 unused_1;
- __le32 offset;
- __le32 len;
- __le32 unused_2;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le16 dir_idx;
+ u8 unused_0[2];
+ __le32 offset;
+ __le32 len;
+ u8 unused_1[4];
+};
+
+/* hwrm_nvm_modify_output (size:128b/16B) */
struct hwrm_nvm_modify_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_find_dir_entry */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */
struct hwrm_nvm_find_dir_entry_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL
- __le16 dir_idx;
- __le16 dir_type;
- __le16 dir_ordinal;
- __le16 dir_ext;
- u8 opt_ordinal;
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ 0x0UL
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE 0x1UL
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT 0x2UL
- u8 unused_1[3];
-};
-
-/* Output (32 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL
+ __le16 dir_idx;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ u8 opt_ordinal;
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ 0x0UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE 0x1UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT 0x2UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_LAST NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT
+ u8 unused_0[3];
+};
+
+/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */
struct hwrm_nvm_find_dir_entry_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 dir_item_length;
- __le32 dir_data_length;
- __le32 fw_ver;
- __le16 dir_ordinal;
- __le16 dir_idx;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_erase_dir_entry */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 dir_item_length;
+ __le32 dir_data_length;
+ __le32 fw_ver;
+ __le16 dir_ordinal;
+ __le16 dir_idx;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */
struct hwrm_nvm_erase_dir_entry_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 dir_idx;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 dir_idx;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */
struct hwrm_nvm_erase_dir_entry_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_get_dev_info */
-/* Input (16 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dev_info_input (size:128b/16B) */
struct hwrm_nvm_get_dev_info_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (32 bytes) */
+/* hwrm_nvm_get_dev_info_output (size:256b/32B) */
struct hwrm_nvm_get_dev_info_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 manufacturer_id;
- __le16 device_id;
- __le32 sector_size;
- __le32 nvram_size;
- __le32 reserved_size;
- __le32 available_size;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_nvm_mod_dir_entry */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 manufacturer_id;
+ __le16 device_id;
+ __le32 sector_size;
+ __le32 nvram_size;
+ __le32 reserved_size;
+ __le32 available_size;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */
struct hwrm_nvm_mod_dir_entry_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL
- __le16 dir_idx;
- __le16 dir_ordinal;
- __le16 dir_ext;
- __le16 dir_attr;
- __le32 checksum;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL
+ __le16 dir_idx;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 dir_attr;
+ __le32 checksum;
+};
+
+/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */
struct hwrm_nvm_mod_dir_entry_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_verify_update */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_verify_update_input (size:192b/24B) */
struct hwrm_nvm_verify_update_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 dir_type;
- __le16 dir_ordinal;
- __le16 dir_ext;
- __le16 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ u8 unused_0[2];
+};
+
+/* hwrm_nvm_verify_update_output (size:128b/16B) */
struct hwrm_nvm_verify_update_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_install_update */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_install_update_input (size:192b/24B) */
struct hwrm_nvm_install_update_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 install_type;
- #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL
- #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL
- __le16 flags;
- #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL
- #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL
- #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL
- __le16 unused_0;
-};
-
-/* Output (24 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 install_type;
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_LAST NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL
+ __le16 flags;
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL
+ u8 unused_0[2];
+};
+
+/* hwrm_nvm_install_update_output (size:192b/24B) */
struct hwrm_nvm_install_update_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 installed_items;
- u8 result;
- #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
- u8 problem_item;
- #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL
- #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
- u8 reset_required;
- #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE 0x0UL
- #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI 0x1UL
- #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 installed_items;
+ u8 result;
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS
+ u8 problem_item;
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_LAST NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE
+ u8 reset_required;
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI 0x1UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_LAST NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */
struct hwrm_nvm_install_update_cmd_err {
- u8 code;
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
- u8 unused_0[7];
+ u8 code;
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE
+ u8 unused_0[7];
};
-/* hwrm_nvm_get_variable */
-/* Input (40 bytes) */
+/* hwrm_nvm_get_variable_input (size:320b/40B) */
struct hwrm_nvm_get_variable_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 dest_data_addr;
- __le16 data_len;
- __le16 option_num;
- #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
- #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
- __le16 dimensions;
- __le16 index_0;
- __le16 index_1;
- __le16 index_2;
- __le16 index_3;
- u8 flags;
- #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL
- u8 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_LAST NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
+ __le16 dimensions;
+ __le16 index_0;
+ __le16 index_1;
+ __le16 index_2;
+ __le16 index_3;
+ u8 flags;
+ #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL
+ u8 unused_0;
+};
+
+/* hwrm_nvm_get_variable_output (size:128b/16B) */
struct hwrm_nvm_get_variable_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 data_len;
- __le16 option_num;
- #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL
- #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_LAST NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */
struct hwrm_nvm_get_variable_cmd_err {
- u8 code;
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
- u8 unused_0[7];
+ u8 code;
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_LAST NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT
+ u8 unused_0[7];
};
-/* hwrm_nvm_set_variable */
-/* Input (40 bytes) */
+/* hwrm_nvm_set_variable_input (size:320b/40B) */
struct hwrm_nvm_set_variable_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 src_data_addr;
- __le16 data_len;
- __le16 option_num;
- #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
- #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
- __le16 dimensions;
- __le16 index_0;
- __le16 index_1;
- __le16 index_2;
- __le16 index_3;
- u8 flags;
- #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1
- u8 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_LAST NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
+ __le16 dimensions;
+ __le16 index_0;
+ __le16 index_1;
+ __le16 index_2;
+ __le16 index_3;
+ u8 flags;
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1
+ u8 unused_0;
+};
+
+/* hwrm_nvm_set_variable_output (size:128b/16B) */
struct hwrm_nvm_set_variable_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */
struct hwrm_nvm_set_variable_cmd_err {
- u8 code;
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
- u8 unused_0[7];
+ u8 code;
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_LAST NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR
+ u8 unused_0[7];
};
-/* hwrm_selftest_qlist */
-/* Input (16 bytes) */
+/* hwrm_selftest_qlist_input (size:128b/16B) */
struct hwrm_selftest_qlist_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (280 bytes) */
+/* hwrm_selftest_qlist_output (size:2240b/280B) */
struct hwrm_selftest_qlist_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_tests;
- u8 available_tests;
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST 0x1UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL
- u8 offline_tests;
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL
- u8 unused_0;
- __le16 test_timeout;
- u8 unused_1;
- u8 unused_2;
- char test0_name[32];
- char test1_name[32];
- char test2_name[32];
- char test3_name[32];
- char test4_name[32];
- char test5_name[32];
- char test6_name[32];
- char test7_name[32];
- __le32 unused_3;
- u8 unused_4;
- u8 unused_5;
- u8 unused_6;
- u8 valid;
-};
-
-/* hwrm_selftest_exec */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_tests;
+ u8 available_tests;
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 offline_tests;
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 unused_0;
+ __le16 test_timeout;
+ u8 unused_1[2];
+ char test0_name[32];
+ char test1_name[32];
+ char test2_name[32];
+ char test3_name[32];
+ char test4_name[32];
+ char test5_name[32];
+ char test6_name[32];
+ char test7_name[32];
+ u8 unused_2[7];
+ u8 valid;
+};
+
+/* hwrm_selftest_exec_input (size:192b/24B) */
struct hwrm_selftest_exec_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 flags;
- #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST 0x1UL
- #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL
- #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL
- #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL
- #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
- u8 unused_0[7];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 flags;
+ #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
+ u8 pcie_lane_num;
+ u8 unused_0[6];
+};
+
+/* hwrm_selftest_exec_output (size:128b/16B) */
struct hwrm_selftest_exec_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 requested_tests;
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST 0x1UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL
- u8 test_success;
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_selftest_irq */
-/* Input (16 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 requested_tests;
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 test_success;
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_selftest_irq_input (size:128b/16B) */
struct hwrm_selftest_irq_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (16 bytes) */
+/* hwrm_selftest_irq_output (size:128b/16B) */
struct hwrm_selftest_irq_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_selftest_retrieve_serdes_data */
-/* Input (32 bytes) */
-struct hwrm_selftest_retrieve_serdes_data_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 resp_data_addr;
- __le32 resp_data_offset;
- __le16 data_len;
- u8 flags;
- #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_MASK 0xfUL
- #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_SFT 0
- #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
- u8 unused_0;
-};
-
-/* Output (16 bytes) */
-struct hwrm_selftest_retrieve_serdes_data_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 total_data_len;
- __le16 copied_data_len;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* Hardware Resource Manager Specification */
-/* Input (16 bytes) */
-struct input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* Output (8 bytes) */
-struct output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
-};
-
-/* Short Command Structure (16 bytes) */
-struct hwrm_short_input {
- __le16 req_type;
- __le16 signature;
- #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL
- __le16 unused_0;
- __le16 size;
- __le64 req_addr;
-};
-
-/* Command numbering (8 bytes) */
-struct cmd_nums {
- __le16 req_type;
- #define HWRM_VER_GET (0x0UL)
- #define HWRM_FUNC_BUF_UNRGTR (0xeUL)
- #define HWRM_FUNC_VF_CFG (0xfUL)
- #define RESERVED1 (0x10UL)
- #define HWRM_FUNC_RESET (0x11UL)
- #define HWRM_FUNC_GETFID (0x12UL)
- #define HWRM_FUNC_VF_ALLOC (0x13UL)
- #define HWRM_FUNC_VF_FREE (0x14UL)
- #define HWRM_FUNC_QCAPS (0x15UL)
- #define HWRM_FUNC_QCFG (0x16UL)
- #define HWRM_FUNC_CFG (0x17UL)
- #define HWRM_FUNC_QSTATS (0x18UL)
- #define HWRM_FUNC_CLR_STATS (0x19UL)
- #define HWRM_FUNC_DRV_UNRGTR (0x1aUL)
- #define HWRM_FUNC_VF_RESC_FREE (0x1bUL)
- #define HWRM_FUNC_VF_VNIC_IDS_QUERY (0x1cUL)
- #define HWRM_FUNC_DRV_RGTR (0x1dUL)
- #define HWRM_FUNC_DRV_QVER (0x1eUL)
- #define HWRM_FUNC_BUF_RGTR (0x1fUL)
- #define HWRM_PORT_PHY_CFG (0x20UL)
- #define HWRM_PORT_MAC_CFG (0x21UL)
- #define HWRM_PORT_TS_QUERY (0x22UL)
- #define HWRM_PORT_QSTATS (0x23UL)
- #define HWRM_PORT_LPBK_QSTATS (0x24UL)
- #define HWRM_PORT_CLR_STATS (0x25UL)
- #define HWRM_PORT_LPBK_CLR_STATS (0x26UL)
- #define HWRM_PORT_PHY_QCFG (0x27UL)
- #define HWRM_PORT_MAC_QCFG (0x28UL)
- #define HWRM_PORT_MAC_PTP_QCFG (0x29UL)
- #define HWRM_PORT_PHY_QCAPS (0x2aUL)
- #define HWRM_PORT_PHY_I2C_WRITE (0x2bUL)
- #define HWRM_PORT_PHY_I2C_READ (0x2cUL)
- #define HWRM_PORT_LED_CFG (0x2dUL)
- #define HWRM_PORT_LED_QCFG (0x2eUL)
- #define HWRM_PORT_LED_QCAPS (0x2fUL)
- #define HWRM_QUEUE_QPORTCFG (0x30UL)
- #define HWRM_QUEUE_QCFG (0x31UL)
- #define HWRM_QUEUE_CFG (0x32UL)
- #define HWRM_FUNC_VLAN_CFG (0x33UL)
- #define HWRM_FUNC_VLAN_QCFG (0x34UL)
- #define HWRM_QUEUE_PFCENABLE_QCFG (0x35UL)
- #define HWRM_QUEUE_PFCENABLE_CFG (0x36UL)
- #define HWRM_QUEUE_PRI2COS_QCFG (0x37UL)
- #define HWRM_QUEUE_PRI2COS_CFG (0x38UL)
- #define HWRM_QUEUE_COS2BW_QCFG (0x39UL)
- #define HWRM_QUEUE_COS2BW_CFG (0x3aUL)
- #define HWRM_QUEUE_DSCP_QCAPS (0x3bUL)
- #define HWRM_QUEUE_DSCP2PRI_QCFG (0x3cUL)
- #define HWRM_QUEUE_DSCP2PRI_CFG (0x3dUL)
- #define HWRM_VNIC_ALLOC (0x40UL)
- #define HWRM_VNIC_FREE (0x41UL)
- #define HWRM_VNIC_CFG (0x42UL)
- #define HWRM_VNIC_QCFG (0x43UL)
- #define HWRM_VNIC_TPA_CFG (0x44UL)
- #define HWRM_VNIC_TPA_QCFG (0x45UL)
- #define HWRM_VNIC_RSS_CFG (0x46UL)
- #define HWRM_VNIC_RSS_QCFG (0x47UL)
- #define HWRM_VNIC_PLCMODES_CFG (0x48UL)
- #define HWRM_VNIC_PLCMODES_QCFG (0x49UL)
- #define HWRM_VNIC_QCAPS (0x4aUL)
- #define HWRM_RING_ALLOC (0x50UL)
- #define HWRM_RING_FREE (0x51UL)
- #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS (0x52UL)
- #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS (0x53UL)
- #define HWRM_RING_RESET (0x5eUL)
- #define HWRM_RING_GRP_ALLOC (0x60UL)
- #define HWRM_RING_GRP_FREE (0x61UL)
- #define RESERVED5 (0x64UL)
- #define RESERVED6 (0x65UL)
- #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (0x70UL)
- #define HWRM_VNIC_RSS_COS_LB_CTX_FREE (0x71UL)
- #define HWRM_CFA_L2_FILTER_ALLOC (0x90UL)
- #define HWRM_CFA_L2_FILTER_FREE (0x91UL)
- #define HWRM_CFA_L2_FILTER_CFG (0x92UL)
- #define HWRM_CFA_L2_SET_RX_MASK (0x93UL)
- #define HWRM_CFA_VLAN_ANTISPOOF_CFG (0x94UL)
- #define HWRM_CFA_TUNNEL_FILTER_ALLOC (0x95UL)
- #define HWRM_CFA_TUNNEL_FILTER_FREE (0x96UL)
- #define HWRM_CFA_ENCAP_RECORD_ALLOC (0x97UL)
- #define HWRM_CFA_ENCAP_RECORD_FREE (0x98UL)
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC (0x99UL)
- #define HWRM_CFA_NTUPLE_FILTER_FREE (0x9aUL)
- #define HWRM_CFA_NTUPLE_FILTER_CFG (0x9bUL)
- #define HWRM_CFA_EM_FLOW_ALLOC (0x9cUL)
- #define HWRM_CFA_EM_FLOW_FREE (0x9dUL)
- #define HWRM_CFA_EM_FLOW_CFG (0x9eUL)
- #define HWRM_TUNNEL_DST_PORT_QUERY (0xa0UL)
- #define HWRM_TUNNEL_DST_PORT_ALLOC (0xa1UL)
- #define HWRM_TUNNEL_DST_PORT_FREE (0xa2UL)
- #define HWRM_STAT_CTX_ALLOC (0xb0UL)
- #define HWRM_STAT_CTX_FREE (0xb1UL)
- #define HWRM_STAT_CTX_QUERY (0xb2UL)
- #define HWRM_STAT_CTX_CLR_STATS (0xb3UL)
- #define HWRM_FW_RESET (0xc0UL)
- #define HWRM_FW_QSTATUS (0xc1UL)
- #define HWRM_FW_SET_TIME (0xc8UL)
- #define HWRM_FW_GET_TIME (0xc9UL)
- #define HWRM_FW_SET_STRUCTURED_DATA (0xcaUL)
- #define HWRM_FW_GET_STRUCTURED_DATA (0xcbUL)
- #define HWRM_FW_IPC_MAILBOX (0xccUL)
- #define HWRM_EXEC_FWD_RESP (0xd0UL)
- #define HWRM_REJECT_FWD_RESP (0xd1UL)
- #define HWRM_FWD_RESP (0xd2UL)
- #define HWRM_FWD_ASYNC_EVENT_CMPL (0xd3UL)
- #define HWRM_TEMP_MONITOR_QUERY (0xe0UL)
- #define HWRM_WOL_FILTER_ALLOC (0xf0UL)
- #define HWRM_WOL_FILTER_FREE (0xf1UL)
- #define HWRM_WOL_FILTER_QCFG (0xf2UL)
- #define HWRM_WOL_REASON_QCFG (0xf3UL)
- #define HWRM_CFA_METER_PROFILE_ALLOC (0xf5UL)
- #define HWRM_CFA_METER_PROFILE_FREE (0xf6UL)
- #define HWRM_CFA_METER_PROFILE_CFG (0xf7UL)
- #define HWRM_CFA_METER_INSTANCE_ALLOC (0xf8UL)
- #define HWRM_CFA_METER_INSTANCE_FREE (0xf9UL)
- #define HWRM_CFA_VFR_ALLOC (0xfdUL)
- #define HWRM_CFA_VFR_FREE (0xfeUL)
- #define HWRM_CFA_VF_PAIR_ALLOC (0x100UL)
- #define HWRM_CFA_VF_PAIR_FREE (0x101UL)
- #define HWRM_CFA_VF_PAIR_INFO (0x102UL)
- #define HWRM_CFA_FLOW_ALLOC (0x103UL)
- #define HWRM_CFA_FLOW_FREE (0x104UL)
- #define HWRM_CFA_FLOW_FLUSH (0x105UL)
- #define HWRM_CFA_FLOW_STATS (0x106UL)
- #define HWRM_CFA_FLOW_INFO (0x107UL)
- #define HWRM_CFA_DECAP_FILTER_ALLOC (0x108UL)
- #define HWRM_CFA_DECAP_FILTER_FREE (0x109UL)
- #define HWRM_CFA_VLAN_ANTISPOOF_QCFG (0x10aUL)
- #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC (0x10bUL)
- #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE (0x10cUL)
- #define HWRM_CFA_PAIR_ALLOC (0x10dUL)
- #define HWRM_CFA_PAIR_FREE (0x10eUL)
- #define HWRM_CFA_PAIR_INFO (0x10fUL)
- #define HWRM_FW_IPC_MSG (0x110UL)
- #define HWRM_SELFTEST_QLIST (0x200UL)
- #define HWRM_SELFTEST_EXEC (0x201UL)
- #define HWRM_SELFTEST_IRQ (0x202UL)
- #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA (0x203UL)
- #define HWRM_DBG_READ_DIRECT (0xff10UL)
- #define HWRM_DBG_READ_INDIRECT (0xff11UL)
- #define HWRM_DBG_WRITE_DIRECT (0xff12UL)
- #define HWRM_DBG_WRITE_INDIRECT (0xff13UL)
- #define HWRM_DBG_DUMP (0xff14UL)
- #define HWRM_DBG_ERASE_NVM (0xff15UL)
- #define HWRM_DBG_CFG (0xff16UL)
- #define HWRM_DBG_COREDUMP_LIST (0xff17UL)
- #define HWRM_DBG_COREDUMP_INITIATE (0xff18UL)
- #define HWRM_DBG_COREDUMP_RETRIEVE (0xff19UL)
- #define HWRM_NVM_FACTORY_DEFAULTS (0xffeeUL)
- #define HWRM_NVM_VALIDATE_OPTION (0xffefUL)
- #define HWRM_NVM_FLUSH (0xfff0UL)
- #define HWRM_NVM_GET_VARIABLE (0xfff1UL)
- #define HWRM_NVM_SET_VARIABLE (0xfff2UL)
- #define HWRM_NVM_INSTALL_UPDATE (0xfff3UL)
- #define HWRM_NVM_MODIFY (0xfff4UL)
- #define HWRM_NVM_VERIFY_UPDATE (0xfff5UL)
- #define HWRM_NVM_GET_DEV_INFO (0xfff6UL)
- #define HWRM_NVM_ERASE_DIR_ENTRY (0xfff7UL)
- #define HWRM_NVM_MOD_DIR_ENTRY (0xfff8UL)
- #define HWRM_NVM_FIND_DIR_ENTRY (0xfff9UL)
- #define HWRM_NVM_GET_DIR_ENTRIES (0xfffaUL)
- #define HWRM_NVM_GET_DIR_INFO (0xfffbUL)
- #define HWRM_NVM_RAW_DUMP (0xfffcUL)
- #define HWRM_NVM_READ (0xfffdUL)
- #define HWRM_NVM_WRITE (0xfffeUL)
- #define HWRM_NVM_RAW_WRITE_BLK (0xffffUL)
- __le16 unused_0[3];
-};
-
-/* Return Codes (8 bytes) */
-struct ret_codes {
- __le16 error_code;
- #define HWRM_ERR_CODE_SUCCESS (0x0UL)
- #define HWRM_ERR_CODE_FAIL (0x1UL)
- #define HWRM_ERR_CODE_INVALID_PARAMS (0x2UL)
- #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED (0x3UL)
- #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR (0x4UL)
- #define HWRM_ERR_CODE_INVALID_FLAGS (0x5UL)
- #define HWRM_ERR_CODE_INVALID_ENABLES (0x6UL)
- #define HWRM_ERR_CODE_HWRM_ERROR (0xfUL)
- #define HWRM_ERR_CODE_UNKNOWN_ERR (0xfffeUL)
- #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED (0xffffUL)
- __le16 unused_0[3];
-};
-
-/* Output (16 bytes) */
-struct hwrm_err_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 opaque_0;
- __le16 opaque_1;
- u8 cmd_err;
- u8 valid;
-};
-
-/* Port Tx Statistics Formats (408 bytes) */
-struct tx_port_stats {
- __le64 tx_64b_frames;
- __le64 tx_65b_127b_frames;
- __le64 tx_128b_255b_frames;
- __le64 tx_256b_511b_frames;
- __le64 tx_512b_1023b_frames;
- __le64 tx_1024b_1518_frames;
- __le64 tx_good_vlan_frames;
- __le64 tx_1519b_2047_frames;
- __le64 tx_2048b_4095b_frames;
- __le64 tx_4096b_9216b_frames;
- __le64 tx_9217b_16383b_frames;
- __le64 tx_good_frames;
- __le64 tx_total_frames;
- __le64 tx_ucast_frames;
- __le64 tx_mcast_frames;
- __le64 tx_bcast_frames;
- __le64 tx_pause_frames;
- __le64 tx_pfc_frames;
- __le64 tx_jabber_frames;
- __le64 tx_fcs_err_frames;
- __le64 tx_control_frames;
- __le64 tx_oversz_frames;
- __le64 tx_single_dfrl_frames;
- __le64 tx_multi_dfrl_frames;
- __le64 tx_single_coll_frames;
- __le64 tx_multi_coll_frames;
- __le64 tx_late_coll_frames;
- __le64 tx_excessive_coll_frames;
- __le64 tx_frag_frames;
- __le64 tx_err;
- __le64 tx_tagged_frames;
- __le64 tx_dbl_tagged_frames;
- __le64 tx_runt_frames;
- __le64 tx_fifo_underruns;
- __le64 tx_pfc_ena_frames_pri0;
- __le64 tx_pfc_ena_frames_pri1;
- __le64 tx_pfc_ena_frames_pri2;
- __le64 tx_pfc_ena_frames_pri3;
- __le64 tx_pfc_ena_frames_pri4;
- __le64 tx_pfc_ena_frames_pri5;
- __le64 tx_pfc_ena_frames_pri6;
- __le64 tx_pfc_ena_frames_pri7;
- __le64 tx_eee_lpi_events;
- __le64 tx_eee_lpi_duration;
- __le64 tx_llfc_logical_msgs;
- __le64 tx_hcfc_msgs;
- __le64 tx_total_collisions;
- __le64 tx_bytes;
- __le64 tx_xthol_frames;
- __le64 tx_stat_discard;
- __le64 tx_stat_error;
-};
-
-/* Port Rx Statistics Formats (528 bytes) */
-struct rx_port_stats {
- __le64 rx_64b_frames;
- __le64 rx_65b_127b_frames;
- __le64 rx_128b_255b_frames;
- __le64 rx_256b_511b_frames;
- __le64 rx_512b_1023b_frames;
- __le64 rx_1024b_1518_frames;
- __le64 rx_good_vlan_frames;
- __le64 rx_1519b_2047b_frames;
- __le64 rx_2048b_4095b_frames;
- __le64 rx_4096b_9216b_frames;
- __le64 rx_9217b_16383b_frames;
- __le64 rx_total_frames;
- __le64 rx_ucast_frames;
- __le64 rx_mcast_frames;
- __le64 rx_bcast_frames;
- __le64 rx_fcs_err_frames;
- __le64 rx_ctrl_frames;
- __le64 rx_pause_frames;
- __le64 rx_pfc_frames;
- __le64 rx_unsupported_opcode_frames;
- __le64 rx_unsupported_da_pausepfc_frames;
- __le64 rx_wrong_sa_frames;
- __le64 rx_align_err_frames;
- __le64 rx_oor_len_frames;
- __le64 rx_code_err_frames;
- __le64 rx_false_carrier_frames;
- __le64 rx_ovrsz_frames;
- __le64 rx_jbr_frames;
- __le64 rx_mtu_err_frames;
- __le64 rx_match_crc_frames;
- __le64 rx_promiscuous_frames;
- __le64 rx_tagged_frames;
- __le64 rx_double_tagged_frames;
- __le64 rx_trunc_frames;
- __le64 rx_good_frames;
- __le64 rx_pfc_xon2xoff_frames_pri0;
- __le64 rx_pfc_xon2xoff_frames_pri1;
- __le64 rx_pfc_xon2xoff_frames_pri2;
- __le64 rx_pfc_xon2xoff_frames_pri3;
- __le64 rx_pfc_xon2xoff_frames_pri4;
- __le64 rx_pfc_xon2xoff_frames_pri5;
- __le64 rx_pfc_xon2xoff_frames_pri6;
- __le64 rx_pfc_xon2xoff_frames_pri7;
- __le64 rx_pfc_ena_frames_pri0;
- __le64 rx_pfc_ena_frames_pri1;
- __le64 rx_pfc_ena_frames_pri2;
- __le64 rx_pfc_ena_frames_pri3;
- __le64 rx_pfc_ena_frames_pri4;
- __le64 rx_pfc_ena_frames_pri5;
- __le64 rx_pfc_ena_frames_pri6;
- __le64 rx_pfc_ena_frames_pri7;
- __le64 rx_sch_crc_err_frames;
- __le64 rx_undrsz_frames;
- __le64 rx_frag_frames;
- __le64 rx_eee_lpi_events;
- __le64 rx_eee_lpi_duration;
- __le64 rx_llfc_physical_msgs;
- __le64 rx_llfc_logical_msgs;
- __le64 rx_llfc_msgs_with_crc_err;
- __le64 rx_hcfc_msgs;
- __le64 rx_hcfc_msgs_with_crc_err;
- __le64 rx_bytes;
- __le64 rx_runt_bytes;
- __le64 rx_runt_frames;
- __le64 rx_stat_discard;
- __le64 rx_stat_err;
-};
-
-/* VXLAN IPv4 encapsulation structure (16 bytes) */
-struct hwrm_vxlan_ipv4_hdr {
- u8 ver_hlen;
- #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL
- #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0
- #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK 0xf0UL
- #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4
- u8 tos;
- __be16 ip_id;
- __be16 flags_frag_offset;
- u8 ttl;
- u8 protocol;
- __be32 src_ip_addr;
- __be32 dest_ip_addr;
-};
-
-/* VXLAN IPv6 encapsulation structure (32 bytes) */
-struct hwrm_vxlan_ipv6_hdr {
- __be32 ver_tc_flow_label;
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT 0x1cUL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK 0xf0000000UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT 0x14UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK 0xff00000UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT 0x0UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL
- __be16 payload_len;
- u8 next_hdr;
- u8 ttl;
- __be32 src_ip_addr[4];
- __be32 dest_ip_addr[4];
-};
-
-/* VXLAN encapsulation structure (72 bytes) */
-struct hwrm_cfa_encap_data_vxlan {
- u8 src_mac_addr[6];
- __le16 unused_0;
- u8 dst_mac_addr[6];
- u8 num_vlan_tags;
- u8 unused_1;
- __be16 ovlan_tpid;
- __be16 ovlan_tci;
- __be16 ivlan_tpid;
- __be16 ivlan_tci;
- __le32 l3[10];
- #define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL
- #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL
- #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL
- __be16 src_port;
- __be16 dst_port;
- __be32 vni;
-};
-
-/* Periodic Statistics Context DMA to host (160 bytes) */
-struct ctx_hw_stats {
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_drop_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_drop_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 tpa_pkts;
- __le64 tpa_bytes;
- __le64 tpa_events;
- __le64 tpa_aborts;
-};
-
-/* Structure data header (16 bytes) */
-struct hwrm_struct_hdr {
- __le16 struct_id;
- #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL
- #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
- #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
- #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
- #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
- #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
- #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
- __le16 len;
- u8 version;
- u8 count;
- __le16 subtype;
- __le16 next_offset;
- #define STRUCT_HDR_NEXT_OFFSET_LAST 0x0UL
- __le16 unused_0[3];
-};
-
-/* DCBX Application configuration structure (1057) (8 bytes) */
-struct hwrm_struct_data_dcbx_app {
- __be16 protocol_id;
- u8 protocol_selector;
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
- u8 priority;
- u8 valid;
- u8 unused_0[3];
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
};
-#endif
+#endif /* _BNXT_HSI_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index c9617675f934..d87faad901fe 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -135,7 +135,10 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id,
ivi->vf = vf_id;
vf = &bp->pf.vf[vf_id];
- memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
+ if (is_valid_ether_addr(vf->mac_addr))
+ memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
+ else
+ memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
ivi->max_tx_rate = vf->max_tx_rate;
ivi->min_tx_rate = vf->min_tx_rate;
ivi->vlan = vf->vlan;
@@ -416,29 +419,126 @@ static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
-/* only call by PF to reserve resources for VF */
+/* Only called by PF to reserve resources for VFs, returns actual number of
+ * VFs configured, or < 0 on error.
+ */
+static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
+{
+ struct hwrm_func_vf_resource_cfg_input req = {0};
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
+ u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
+ struct bnxt_pf_info *pf = &bp->pf;
+ int i, rc = 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
+
+ vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings;
+ vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
+ else
+ vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
+ vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
+ vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
+ vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
+ vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
+
+ req.min_rsscos_ctx = cpu_to_le16(1);
+ req.max_rsscos_ctx = cpu_to_le16(1);
+ if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL) {
+ req.min_cmpl_rings = cpu_to_le16(1);
+ req.min_tx_rings = cpu_to_le16(1);
+ req.min_rx_rings = cpu_to_le16(1);
+ req.min_l2_ctxs = cpu_to_le16(1);
+ req.min_vnics = cpu_to_le16(1);
+ req.min_stat_ctx = cpu_to_le16(1);
+ req.min_hw_ring_grps = cpu_to_le16(1);
+ } else {
+ vf_cp_rings /= num_vfs;
+ vf_tx_rings /= num_vfs;
+ vf_rx_rings /= num_vfs;
+ vf_vnics /= num_vfs;
+ vf_stat_ctx /= num_vfs;
+ vf_ring_grps /= num_vfs;
+
+ req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
+ req.min_tx_rings = cpu_to_le16(vf_tx_rings);
+ req.min_rx_rings = cpu_to_le16(vf_rx_rings);
+ req.min_l2_ctxs = cpu_to_le16(4);
+ req.min_vnics = cpu_to_le16(vf_vnics);
+ req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
+ req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+ }
+ req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
+ req.max_tx_rings = cpu_to_le16(vf_tx_rings);
+ req.max_rx_rings = cpu_to_le16(vf_rx_rings);
+ req.max_l2_ctxs = cpu_to_le16(4);
+ req.max_vnics = cpu_to_le16(vf_vnics);
+ req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
+ req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < num_vfs; i++) {
+ req.vf_id = cpu_to_le16(pf->first_vf_id + i);
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc) {
+ rc = -ENOMEM;
+ break;
+ }
+ pf->active_vfs = i + 1;
+ pf->vf[i].fw_fid = pf->first_vf_id + i;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ if (pf->active_vfs) {
+ u16 n = 1;
+
+ if (pf->vf_resv_strategy != BNXT_VF_RESV_STRATEGY_MINIMAL)
+ n = pf->active_vfs;
+
+ hw_resc->max_tx_rings -= vf_tx_rings * n;
+ hw_resc->max_rx_rings -= vf_rx_rings * n;
+ hw_resc->max_hw_ring_grps -= vf_ring_grps * n;
+ hw_resc->max_cp_rings -= vf_cp_rings * n;
+ hw_resc->max_rsscos_ctxs -= pf->active_vfs;
+ hw_resc->max_stat_ctxs -= vf_stat_ctx * n;
+ hw_resc->max_vnics -= vf_vnics * n;
+
+ rc = pf->active_vfs;
+ }
+ return rc;
+}
+
+/* Only called by PF to reserve resources for VFs, returns actual number of
+ * VFs configured, or < 0 on error.
+ */
static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
{
u32 rc = 0, mtu, i;
u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
- u16 vf_ring_grps;
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ u16 vf_ring_grps, max_stat_ctxs;
struct hwrm_func_cfg_input req = {0};
struct bnxt_pf_info *pf = &bp->pf;
int total_vf_tx_rings = 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ max_stat_ctxs = hw_resc->max_stat_ctxs;
+
/* Remaining rings are distributed equally amongs VF's for now */
- vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs;
- vf_stat_ctx = (pf->max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
+ vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs;
+ vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
- vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) /
+ vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
num_vfs;
else
- vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs;
- vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
- vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs;
- vf_vnics = (pf->max_vnics - bp->nr_vnics) / num_vfs;
+ vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) /
+ num_vfs;
+ vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
+ vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs;
+ vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
@@ -485,22 +585,34 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
total_vf_tx_rings += vf_tx_rsvd;
}
mutex_unlock(&bp->hwrm_cmd_lock);
- if (!rc) {
- pf->max_tx_rings -= total_vf_tx_rings;
- pf->max_rx_rings -= vf_rx_rings * num_vfs;
- pf->max_hw_ring_grps -= vf_ring_grps * num_vfs;
- pf->max_cp_rings -= vf_cp_rings * num_vfs;
- pf->max_rsscos_ctxs -= num_vfs;
- pf->max_stat_ctxs -= vf_stat_ctx * num_vfs;
- pf->max_vnics -= vf_vnics * num_vfs;
+ if (rc)
+ rc = -ENOMEM;
+ if (pf->active_vfs) {
+ hw_resc->max_tx_rings -= total_vf_tx_rings;
+ hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
+ hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs;
+ hw_resc->max_cp_rings -= vf_cp_rings * num_vfs;
+ hw_resc->max_rsscos_ctxs -= num_vfs;
+ hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs;
+ hw_resc->max_vnics -= vf_vnics * num_vfs;
+ rc = pf->active_vfs;
}
return rc;
}
+static int bnxt_func_cfg(struct bnxt *bp, int num_vfs)
+{
+ if (bp->flags & BNXT_FLAG_NEW_RM)
+ return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs);
+ else
+ return bnxt_hwrm_func_cfg(bp, num_vfs);
+}
+
static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
{
int rc = 0, vfs_supported;
int min_rx_rings, min_tx_rings, min_rss_ctxs;
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
int tx_ok = 0, rx_ok = 0, rss_ok = 0;
int avail_cp, avail_stat;
@@ -510,8 +622,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
*/
vfs_supported = *num_vfs;
- avail_cp = bp->pf.max_cp_rings - bp->cp_nr_rings;
- avail_stat = bp->pf.max_stat_ctxs - bp->num_stat_ctxs;
+ avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings;
+ avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
avail_cp = min_t(int, avail_cp, avail_stat);
while (vfs_supported) {
@@ -520,23 +632,24 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
min_rss_ctxs = vfs_supported;
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
- if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >=
+ if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >=
min_rx_rings)
rx_ok = 1;
} else {
- if (bp->pf.max_rx_rings - bp->rx_nr_rings >=
+ if (hw_resc->max_rx_rings - bp->rx_nr_rings >=
min_rx_rings)
rx_ok = 1;
}
- if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings ||
+ if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings ||
avail_cp < min_rx_rings)
rx_ok = 0;
- if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
+ if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
avail_cp >= min_tx_rings)
tx_ok = 1;
- if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
+ if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >=
+ min_rss_ctxs)
rss_ok = 1;
if (tx_ok && rx_ok && rss_ok)
@@ -561,9 +674,16 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
goto err_out1;
/* Reserve resources for VFs */
- rc = bnxt_hwrm_func_cfg(bp, *num_vfs);
- if (rc)
- goto err_out2;
+ rc = bnxt_func_cfg(bp, *num_vfs);
+ if (rc != *num_vfs) {
+ if (rc <= 0) {
+ netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
+ *num_vfs = 0;
+ goto err_out2;
+ }
+ netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", rc);
+ *num_vfs = rc;
+ }
/* Register buffers for VFs */
rc = bnxt_hwrm_func_buf_rgtr(bp);
@@ -766,17 +886,51 @@ exec_fwd_resp_exit:
return rc;
}
+static int bnxt_vf_store_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+ u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input);
+ struct hwrm_func_vf_cfg_input *req =
+ (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr;
+
+ /* Only allow VF to set a valid MAC address if the PF assigned MAC
+ * address is zero
+ */
+ if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
+ if (is_valid_ether_addr(req->dflt_mac_addr) &&
+ !is_valid_ether_addr(vf->mac_addr)) {
+ ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
+ return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
+ }
+ return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
+ }
+ return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
+}
+
static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
{
u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
struct hwrm_cfa_l2_filter_alloc_input *req =
(struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
+ bool mac_ok = false;
- if (!is_valid_ether_addr(vf->mac_addr) ||
- ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
+ /* VF MAC address must first match PF MAC address, if it is valid.
+ * Otherwise, it must match the VF MAC address if firmware spec >=
+ * 1.2.2
+ */
+ if (is_valid_ether_addr(vf->mac_addr)) {
+ if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
+ mac_ok = true;
+ } else if (is_valid_ether_addr(vf->vf_mac_addr)) {
+ if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
+ mac_ok = true;
+ } else if (bp->hwrm_spec_code < 0x10202) {
+ mac_ok = true;
+ } else {
+ mac_ok = true;
+ }
+ if (mac_ok)
return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
- else
- return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
+ return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
}
static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
@@ -838,6 +992,9 @@ static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
u32 req_type = le16_to_cpu(encap_req->req_type);
switch (req_type) {
+ case HWRM_FUNC_VF_CFG:
+ rc = bnxt_vf_store_mac(bp, vf);
+ break;
case HWRM_CFA_L2_FILTER_ALLOC:
rc = bnxt_vf_validate_set_mac(bp, vf);
break;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index d8fee26cd45e..fbe6e208e17b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -43,7 +43,7 @@ static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
}
/* Is dev a VF-rep? */
- if (dev != pf_bp->dev)
+ if (bnxt_dev_is_vf_rep(dev))
return bnxt_vf_rep_get_fid(dev);
bp = netdev_priv(dev);
@@ -54,12 +54,10 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
struct bnxt_tc_actions *actions,
const struct tc_action *tc_act)
{
- int ifindex = tcf_mirred_ifindex(tc_act);
- struct net_device *dev;
+ struct net_device *dev = tcf_mirred_dev(tc_act);
- dev = __dev_get_by_index(dev_net(bp->dev), ifindex);
if (!dev) {
- netdev_info(bp->dev, "no dev for ifindex=%d", ifindex);
+ netdev_info(bp->dev, "no dev in mirred action");
return -EINVAL;
}
@@ -148,9 +146,6 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
}
}
- if (rc)
- return rc;
-
if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
/* dst_fid is PF's fid */
@@ -164,7 +159,7 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
}
}
- return rc;
+ return 0;
}
#define GET_KEY(flow_cmd, key_type) \
@@ -1417,11 +1412,7 @@ bnxt_tc_flow_stats_batch_prep(struct bnxt *bp,
void *flow_node;
int rc, i;
- rc = rhashtable_walk_start(iter);
- if (rc && rc != -EAGAIN) {
- i = 0;
- goto done;
- }
+ rhashtable_walk_start(iter);
rc = 0;
for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) {
@@ -1483,9 +1474,6 @@ int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
{
int rc = 0;
- if (cls_flower->common.chain_index)
- return -EOPNOTSUPP;
-
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
rc = bnxt_tc_add_flow(bp, src_fid, cls_flower);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 69186d188c43..26290403f38f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -124,7 +124,8 @@ static int bnxt_vf_rep_setup_tc_block_cb(enum tc_setup_type type,
struct bnxt *bp = vf_rep->bp;
int vf_fid = bp->pf.vf[vf_rep->vf_idx].fw_fid;
- if (!bnxt_tc_flower_enabled(vf_rep->bp) || !tc_can_offload(bp->dev))
+ if (!bnxt_tc_flower_enabled(vf_rep->bp) ||
+ !tc_cls_can_offload_and_chain0(bp->dev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -241,6 +242,11 @@ static const struct net_device_ops bnxt_vf_rep_netdev_ops = {
.ndo_get_phys_port_name = bnxt_vf_rep_get_phys_port_name
};
+bool bnxt_dev_is_vf_rep(struct net_device *dev)
+{
+ return dev->netdev_ops == &bnxt_vf_rep_netdev_ops;
+}
+
/* Called when the parent PF interface is closed:
* As the mode transition from SWITCHDEV to LEGACY
* happens under the rtnl_lock() this routine is safe
@@ -376,6 +382,26 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
ether_addr_copy(dev->dev_addr, dev->perm_addr);
}
+static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
+{
+ struct pci_dev *pdev = bp->pdev;
+ int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
+ u32 dw;
+
+ if (!pos) {
+ netdev_info(bp->dev, "Unable do read adapter's DSN");
+ return -EOPNOTSUPP;
+ }
+
+ /* DSN (two dw) is at an offset of 4 from the cap pos */
+ pos += 4;
+ pci_read_config_dword(pdev, pos, &dw);
+ put_unaligned_le32(dw, &dsn[0]);
+ pci_read_config_dword(pdev, pos + 4, &dw);
+ put_unaligned_le32(dw, &dsn[4]);
+ return 0;
+}
+
static int bnxt_vf_reps_create(struct bnxt *bp)
{
u16 *cfa_code_map = NULL, num_vfs = pci_num_vf(bp->pdev);
@@ -440,6 +466,11 @@ static int bnxt_vf_reps_create(struct bnxt *bp)
}
}
+ /* Read the adapter's DSN to use as the eswitch switch_id */
+ rc = bnxt_pcie_dsn_get(bp, bp->switch_id);
+ if (rc)
+ goto err;
+
/* publish cfa_code_map only after all VF-reps have been initialized */
bp->cfa_code_map = cfa_code_map;
bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
index fb06bbe70e42..38b9a75ad724 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
@@ -28,6 +28,7 @@ static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
return bp->pf.vf[vf_rep->vf_idx].fw_fid;
}
+bool bnxt_dev_is_vf_rep(struct net_device *dev);
int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode);
@@ -54,5 +55,10 @@ static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
{
return 0;
}
+
+static inline bool bnxt_dev_is_vf_rep(struct net_device *dev)
+{
+ return false;
+}
#endif /* CONFIG_BNXT_SRIOV */
#endif /* BNXT_VFR_H */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 261e5847557a..1389ab5e05df 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -96,6 +96,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
xdp.data = *data_ptr;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = *data_ptr + *len;
+ xdp.rxq = &rxr->xdp_rxq;
orig_data = xdp.data;
mapping = rx_buf->mapping - bp->rx_dma_offset;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 24b4f4ceceef..b1e35a9accf1 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2527,9 +2527,10 @@ static void bcmgenet_irq_task(struct work_struct *work)
spin_unlock_irq(&priv->lock);
/* Link UP/DOWN event */
- if (status & UMAC_IRQ_LINK_EVENT)
- phy_mac_interrupt(priv->dev->phydev,
- !!(status & UMAC_IRQ_LINK_UP));
+ if (status & UMAC_IRQ_LINK_EVENT) {
+ priv->dev->phydev->link = !!(status & UMAC_IRQ_LINK_UP);
+ phy_mac_interrupt(priv->dev->phydev);
+ }
}
/* bcmgenet_isr1: handle Rx and Tx priority queues */
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 8995cfefbfcf..a77ee2f8fb8d 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -3227,7 +3227,7 @@ static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
return 0;
}
-#define NVRAM_CMD_TIMEOUT 5000
+#define NVRAM_CMD_TIMEOUT 10000
static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
{
@@ -14789,7 +14789,7 @@ static void tg3_get_5717_nvram_info(struct tg3 *tp)
static void tg3_get_5720_nvram_info(struct tg3 *tp)
{
- u32 nvcfg1, nvmpinstrp;
+ u32 nvcfg1, nvmpinstrp, nv_status;
nvcfg1 = tr32(NVRAM_CFG1);
nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
@@ -14801,6 +14801,23 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
}
switch (nvmpinstrp) {
+ case FLASH_5762_MX25L_100:
+ case FLASH_5762_MX25L_200:
+ case FLASH_5762_MX25L_400:
+ case FLASH_5762_MX25L_800:
+ case FLASH_5762_MX25L_160_320:
+ tp->nvram_pagesize = 4096;
+ tp->nvram_jedecnum = JEDEC_MACRONIX;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
+ tg3_flag_set(tp, FLASH);
+ nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
+ tp->nvram_size =
+ (1 << (nv_status >> AUTOSENSE_DEVID &
+ AUTOSENSE_DEVID_MASK)
+ << AUTOSENSE_SIZE_IN_MB);
+ return;
+
case FLASH_5762_EEPROM_HD:
nvmpinstrp = FLASH_5720_EEPROM_HD;
break;
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 1f0271fa7c74..47f51cc0566d 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -1863,7 +1863,7 @@
#define NVRAM_STAT 0x00007004
#define NVRAM_WRDATA 0x00007008
#define NVRAM_ADDR 0x0000700c
-#define NVRAM_ADDR_MSK 0x00ffffff
+#define NVRAM_ADDR_MSK 0x07ffffff
#define NVRAM_RDDATA 0x00007010
#define NVRAM_CFG1 0x00007014
#define NVRAM_CFG1_FLASHIF_ENAB 0x00000001
@@ -1945,6 +1945,11 @@
#define FLASH_5720_EEPROM_LD 0x00000003
#define FLASH_5762_EEPROM_HD 0x02000001
#define FLASH_5762_EEPROM_LD 0x02000003
+#define FLASH_5762_MX25L_100 0x00800000
+#define FLASH_5762_MX25L_200 0x00800002
+#define FLASH_5762_MX25L_400 0x00800001
+#define FLASH_5762_MX25L_800 0x00800003
+#define FLASH_5762_MX25L_160_320 0x03800002
#define FLASH_5720VENDOR_M_ATMEL_DB011D 0x01000000
#define FLASH_5720VENDOR_M_ATMEL_DB021D 0x01000002
#define FLASH_5720VENDOR_M_ATMEL_DB041D 0x01000001
@@ -2009,7 +2014,11 @@
/* 0x702c unused */
#define NVRAM_ADDR_LOCKOUT 0x00007030
-/* 0x7034 --> 0x7500 unused */
+#define NVRAM_AUTOSENSE_STATUS 0x00007038
+#define AUTOSENSE_DEVID 0x00000010
+#define AUTOSENSE_DEVID_MASK 0x00000007
+#define AUTOSENSE_SIZE_IN_MB 17
+/* 0x703c --> 0x7500 unused */
#define OTP_MODE 0x00007500
#define OTP_MODE_OTP_THRU_GRC 0x00000001
@@ -3378,6 +3387,7 @@ struct tg3 {
#define JEDEC_ST 0x20
#define JEDEC_SAIFUN 0x4f
#define JEDEC_SST 0xbf
+#define JEDEC_MACRONIX 0xc2
#define ATMEL_AT24C02_CHIP_SIZE TG3_NVRAM_SIZE_2KB
#define ATMEL_AT24C02_PAGE_SIZE (8)
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index c93f3a2dc6c1..86659823b259 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -13,6 +13,7 @@
#include <linux/phy.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
+#include <linux/interrupt.h>
#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) || defined(CONFIG_MACB_USE_HWSTAMP)
#define MACB_EXT_DESC
@@ -164,14 +165,38 @@
#define GEM_DCFG5 0x0290 /* Design Config 5 */
#define GEM_DCFG6 0x0294 /* Design Config 6 */
#define GEM_DCFG7 0x0298 /* Design Config 7 */
+#define GEM_DCFG8 0x029C /* Design Config 8 */
#define GEM_TXBDCTRL 0x04cc /* TX Buffer Descriptor control register */
#define GEM_RXBDCTRL 0x04d0 /* RX Buffer Descriptor control register */
+/* Screener Type 2 match registers */
+#define GEM_SCRT2 0x540
+
+/* EtherType registers */
+#define GEM_ETHT 0x06E0
+
+/* Type 2 compare registers */
+#define GEM_T2CMPW0 0x0700
+#define GEM_T2CMPW1 0x0704
+#define T2CMP_OFST(t2idx) (t2idx * 2)
+
+/* type 2 compare registers
+ * each location requires 3 compare regs
+ */
+#define GEM_IP4SRC_CMP(idx) (idx * 3)
+#define GEM_IP4DST_CMP(idx) (idx * 3 + 1)
+#define GEM_PORT_CMP(idx) (idx * 3 + 2)
+
+/* Which screening type 2 EtherType register will be used (0 - 7) */
+#define SCRT2_ETHT 0
+
#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2))
#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2))
#define GEM_TBQPH(hw_q) (0x04C8)
#define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2))
+#define GEM_RBQS(hw_q) (0x04A0 + ((hw_q) << 2))
+#define GEM_RBQPH(hw_q) (0x04D4)
#define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2))
#define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2))
#define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2))
@@ -455,6 +480,16 @@
#define GEM_DAW64_OFFSET 23
#define GEM_DAW64_SIZE 1
+/* Bitfields in DCFG8. */
+#define GEM_T1SCR_OFFSET 24
+#define GEM_T1SCR_SIZE 8
+#define GEM_T2SCR_OFFSET 16
+#define GEM_T2SCR_SIZE 8
+#define GEM_SCR2ETH_OFFSET 8
+#define GEM_SCR2ETH_SIZE 8
+#define GEM_SCR2CMP_OFFSET 0
+#define GEM_SCR2CMP_SIZE 8
+
/* Bitfields in TISUBN */
#define GEM_SUBNSINCR_OFFSET 0
#define GEM_SUBNSINCR_SIZE 16
@@ -483,6 +518,66 @@
#define GEM_RXTSMODE_OFFSET 4 /* RX Descriptor Timestamp Insertion mode */
#define GEM_RXTSMODE_SIZE 2
+/* Bitfields in SCRT2 */
+#define GEM_QUEUE_OFFSET 0 /* Queue Number */
+#define GEM_QUEUE_SIZE 4
+#define GEM_VLANPR_OFFSET 4 /* VLAN Priority */
+#define GEM_VLANPR_SIZE 3
+#define GEM_VLANEN_OFFSET 8 /* VLAN Enable */
+#define GEM_VLANEN_SIZE 1
+#define GEM_ETHT2IDX_OFFSET 9 /* Index to screener type 2 EtherType register */
+#define GEM_ETHT2IDX_SIZE 3
+#define GEM_ETHTEN_OFFSET 12 /* EtherType Enable */
+#define GEM_ETHTEN_SIZE 1
+#define GEM_CMPA_OFFSET 13 /* Compare A - Index to screener type 2 Compare register */
+#define GEM_CMPA_SIZE 5
+#define GEM_CMPAEN_OFFSET 18 /* Compare A Enable */
+#define GEM_CMPAEN_SIZE 1
+#define GEM_CMPB_OFFSET 19 /* Compare B - Index to screener type 2 Compare register */
+#define GEM_CMPB_SIZE 5
+#define GEM_CMPBEN_OFFSET 24 /* Compare B Enable */
+#define GEM_CMPBEN_SIZE 1
+#define GEM_CMPC_OFFSET 25 /* Compare C - Index to screener type 2 Compare register */
+#define GEM_CMPC_SIZE 5
+#define GEM_CMPCEN_OFFSET 30 /* Compare C Enable */
+#define GEM_CMPCEN_SIZE 1
+
+/* Bitfields in ETHT */
+#define GEM_ETHTCMP_OFFSET 0 /* EtherType compare value */
+#define GEM_ETHTCMP_SIZE 16
+
+/* Bitfields in T2CMPW0 */
+#define GEM_T2CMP_OFFSET 16 /* 0xFFFF0000 compare value */
+#define GEM_T2CMP_SIZE 16
+#define GEM_T2MASK_OFFSET 0 /* 0x0000FFFF compare value or mask */
+#define GEM_T2MASK_SIZE 16
+
+/* Bitfields in T2CMPW1 */
+#define GEM_T2DISMSK_OFFSET 9 /* disable mask */
+#define GEM_T2DISMSK_SIZE 1
+#define GEM_T2CMPOFST_OFFSET 7 /* compare offset */
+#define GEM_T2CMPOFST_SIZE 2
+#define GEM_T2OFST_OFFSET 0 /* offset value */
+#define GEM_T2OFST_SIZE 7
+
+/* Offset for screener type 2 compare values (T2CMPOFST).
+ * Note the offset is applied after the specified point,
+ * e.g. GEM_T2COMPOFST_ETYPE denotes the EtherType field, so an offset
+ * of 12 bytes from this would be the source IP address in an IP header
+ */
+#define GEM_T2COMPOFST_SOF 0
+#define GEM_T2COMPOFST_ETYPE 1
+#define GEM_T2COMPOFST_IPHDR 2
+#define GEM_T2COMPOFST_TCPUDP 3
+
+/* offset from EtherType to IP address */
+#define ETYPE_SRCIP_OFFSET 12
+#define ETYPE_DSTIP_OFFSET 16
+
+/* offset from IP header to port */
+#define IPHDR_SRCPORT_OFFSET 0
+#define IPHDR_DSTPORT_OFFSET 2
+
/* Transmit DMA buffer descriptor Word 1 */
#define GEM_DMA_TXVALID_OFFSET 23 /* timestamp has been captured in the Buffer Descriptor */
#define GEM_DMA_TXVALID_SIZE 1
@@ -583,6 +678,8 @@
#define gem_writel(port, reg, value) (port)->macb_reg_writel((port), GEM_##reg, (value))
#define queue_readl(queue, reg) (queue)->bp->macb_reg_readl((queue)->bp, (queue)->reg)
#define queue_writel(queue, reg, value) (queue)->bp->macb_reg_writel((queue)->bp, (queue)->reg, (value))
+#define gem_readl_n(port, reg, idx) (port)->macb_reg_readl((port), GEM_##reg + idx * 4)
+#define gem_writel_n(port, reg, idx, value) (port)->macb_reg_writel((port), GEM_##reg + idx * 4, (value))
#define PTP_TS_BUFFER_SIZE 128 /* must be power of 2 */
@@ -920,13 +1017,42 @@ static const struct gem_statistic gem_statistics[] = {
#define GEM_STATS_LEN ARRAY_SIZE(gem_statistics)
+#define QUEUE_STAT_TITLE(title) { \
+ .stat_string = title, \
+}
+
+/* per queue statistics, each should be unsigned long type */
+struct queue_stats {
+ union {
+ unsigned long first;
+ unsigned long rx_packets;
+ };
+ unsigned long rx_bytes;
+ unsigned long rx_dropped;
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
+ unsigned long tx_dropped;
+};
+
+static const struct gem_statistic queue_statistics[] = {
+ QUEUE_STAT_TITLE("rx_packets"),
+ QUEUE_STAT_TITLE("rx_bytes"),
+ QUEUE_STAT_TITLE("rx_dropped"),
+ QUEUE_STAT_TITLE("tx_packets"),
+ QUEUE_STAT_TITLE("tx_bytes"),
+ QUEUE_STAT_TITLE("tx_dropped"),
+};
+
+#define QUEUE_STATS_LEN ARRAY_SIZE(queue_statistics)
+
struct macb;
+struct macb_queue;
struct macb_or_gem_ops {
int (*mog_alloc_rx_buffers)(struct macb *bp);
void (*mog_free_rx_buffers)(struct macb *bp);
void (*mog_init_rings)(struct macb *bp);
- int (*mog_rx)(struct macb *bp, int budget);
+ int (*mog_rx)(struct macb_queue *queue, int budget);
};
/* MACB-PTP interface: adapt to platform needs. */
@@ -968,6 +1094,9 @@ struct macb_queue {
unsigned int IMR;
unsigned int TBQP;
unsigned int TBQPH;
+ unsigned int RBQS;
+ unsigned int RBQP;
+ unsigned int RBQPH;
unsigned int tx_head, tx_tail;
struct macb_dma_desc *tx_ring;
@@ -975,6 +1104,16 @@ struct macb_queue {
dma_addr_t tx_ring_dma;
struct work_struct tx_error_task;
+ dma_addr_t rx_ring_dma;
+ dma_addr_t rx_buffers_dma;
+ unsigned int rx_tail;
+ unsigned int rx_prepared_head;
+ struct macb_dma_desc *rx_ring;
+ struct sk_buff **rx_skbuff;
+ void *rx_buffers;
+ struct napi_struct napi;
+ struct queue_stats stats;
+
#ifdef CONFIG_MACB_USE_HWSTAMP
struct work_struct tx_ts_task;
unsigned int tx_ts_head, tx_ts_tail;
@@ -982,6 +1121,16 @@ struct macb_queue {
#endif
};
+struct ethtool_rx_fs_item {
+ struct ethtool_rx_flow_spec fs;
+ struct list_head list;
+};
+
+struct ethtool_rx_fs_list {
+ struct list_head list;
+ unsigned int count;
+};
+
struct macb {
void __iomem *regs;
bool native_io;
@@ -990,11 +1139,6 @@ struct macb {
u32 (*macb_reg_readl)(struct macb *bp, int offset);
void (*macb_reg_writel)(struct macb *bp, int offset, u32 value);
- unsigned int rx_tail;
- unsigned int rx_prepared_head;
- struct macb_dma_desc *rx_ring;
- struct sk_buff **rx_skbuff;
- void *rx_buffers;
size_t rx_buffer_size;
unsigned int rx_ring_size;
@@ -1011,15 +1155,11 @@ struct macb {
struct clk *tx_clk;
struct clk *rx_clk;
struct net_device *dev;
- struct napi_struct napi;
union {
struct macb_stats macb;
struct gem_stats gem;
} hw_stats;
- dma_addr_t rx_ring_dma;
- dma_addr_t rx_buffers_dma;
-
struct macb_or_gem_ops macbgem_ops;
struct mii_bus *mii_bus;
@@ -1032,7 +1172,6 @@ struct macb {
unsigned int dma_burst_length;
phy_interface_t phy_interface;
- struct gpio_desc *reset_gpio;
/* AT91RM9200 transmit */
struct sk_buff *skb; /* holds skb until xmit interrupt completes */
@@ -1040,7 +1179,7 @@ struct macb {
int skb_length; /* saved skb length for pci_unmap_single */
unsigned int max_tx_length;
- u64 ethtool_stats[GEM_STATS_LEN];
+ u64 ethtool_stats[GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES];
unsigned int rx_frm_len_mask;
unsigned int jumbo_max_len;
@@ -1057,6 +1196,13 @@ struct macb {
struct ptp_clock_info ptp_clock_info;
struct tsu_incr tsu_incr;
struct hwtstamp_config tstamp_config;
+
+ /* RX queue filer rule set*/
+ struct ethtool_rx_fs_list rx_fs_list;
+ spinlock_t rx_fs_lock;
+ unsigned int max_tuples;
+
+ struct tasklet_struct hresp_err_tasklet;
};
#ifdef CONFIG_MACB_USE_HWSTAMP
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 72a67f74b97b..e84afcf1ecb5 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -194,17 +194,17 @@ static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
return index & (bp->rx_ring_size - 1);
}
-static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
+static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index)
{
- index = macb_rx_ring_wrap(bp, index);
- index = macb_adj_dma_desc_idx(bp, index);
- return &bp->rx_ring[index];
+ index = macb_rx_ring_wrap(queue->bp, index);
+ index = macb_adj_dma_desc_idx(queue->bp, index);
+ return &queue->rx_ring[index];
}
-static void *macb_rx_buffer(struct macb *bp, unsigned int index)
+static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index)
{
- return bp->rx_buffers + bp->rx_buffer_size *
- macb_rx_ring_wrap(bp, index);
+ return queue->rx_buffers + queue->bp->rx_buffer_size *
+ macb_rx_ring_wrap(queue->bp, index);
}
/* I/O accessors */
@@ -759,7 +759,9 @@ static void macb_tx_error_task(struct work_struct *work)
macb_tx_ring_wrap(bp, tail),
skb->data);
bp->dev->stats.tx_packets++;
+ queue->stats.tx_packets++;
bp->dev->stats.tx_bytes += skb->len;
+ queue->stats.tx_bytes += skb->len;
}
} else {
/* "Buffers exhausted mid-frame" errors may only happen
@@ -859,7 +861,9 @@ static void macb_tx_interrupt(struct macb_queue *queue)
macb_tx_ring_wrap(bp, tail),
skb->data);
bp->dev->stats.tx_packets++;
+ queue->stats.tx_packets++;
bp->dev->stats.tx_bytes += skb->len;
+ queue->stats.tx_bytes += skb->len;
}
/* Now we can safely release resources */
@@ -881,24 +885,25 @@ static void macb_tx_interrupt(struct macb_queue *queue)
netif_wake_subqueue(bp->dev, queue_index);
}
-static void gem_rx_refill(struct macb *bp)
+static void gem_rx_refill(struct macb_queue *queue)
{
unsigned int entry;
struct sk_buff *skb;
dma_addr_t paddr;
+ struct macb *bp = queue->bp;
struct macb_dma_desc *desc;
- while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
- bp->rx_ring_size) > 0) {
- entry = macb_rx_ring_wrap(bp, bp->rx_prepared_head);
+ while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
+ bp->rx_ring_size) > 0) {
+ entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
/* Make hw descriptor updates visible to CPU */
rmb();
- bp->rx_prepared_head++;
- desc = macb_rx_desc(bp, entry);
+ queue->rx_prepared_head++;
+ desc = macb_rx_desc(queue, entry);
- if (!bp->rx_skbuff[entry]) {
+ if (!queue->rx_skbuff[entry]) {
/* allocate sk_buff for this free entry in ring */
skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
if (unlikely(!skb)) {
@@ -916,7 +921,7 @@ static void gem_rx_refill(struct macb *bp)
break;
}
- bp->rx_skbuff[entry] = skb;
+ queue->rx_skbuff[entry] = skb;
if (entry == bp->rx_ring_size - 1)
paddr |= MACB_BIT(RX_WRAP);
@@ -934,18 +939,18 @@ static void gem_rx_refill(struct macb *bp)
/* Make descriptor updates visible to hardware */
wmb();
- netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
- bp->rx_prepared_head, bp->rx_tail);
+ netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
+ queue, queue->rx_prepared_head, queue->rx_tail);
}
/* Mark DMA descriptors from begin up to and not including end as unused */
-static void discard_partial_frame(struct macb *bp, unsigned int begin,
+static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
unsigned int end)
{
unsigned int frag;
for (frag = begin; frag != end; frag++) {
- struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
+ struct macb_dma_desc *desc = macb_rx_desc(queue, frag);
desc->addr &= ~MACB_BIT(RX_USED);
}
@@ -959,8 +964,9 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin,
*/
}
-static int gem_rx(struct macb *bp, int budget)
+static int gem_rx(struct macb_queue *queue, int budget)
{
+ struct macb *bp = queue->bp;
unsigned int len;
unsigned int entry;
struct sk_buff *skb;
@@ -972,8 +978,8 @@ static int gem_rx(struct macb *bp, int budget)
dma_addr_t addr;
bool rxused;
- entry = macb_rx_ring_wrap(bp, bp->rx_tail);
- desc = macb_rx_desc(bp, entry);
+ entry = macb_rx_ring_wrap(bp, queue->rx_tail);
+ desc = macb_rx_desc(queue, entry);
/* Make hw descriptor updates visible to CPU */
rmb();
@@ -985,24 +991,26 @@ static int gem_rx(struct macb *bp, int budget)
if (!rxused)
break;
- bp->rx_tail++;
+ queue->rx_tail++;
count++;
if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
netdev_err(bp->dev,
"not whole frame pointed by descriptor\n");
bp->dev->stats.rx_dropped++;
+ queue->stats.rx_dropped++;
break;
}
- skb = bp->rx_skbuff[entry];
+ skb = queue->rx_skbuff[entry];
if (unlikely(!skb)) {
netdev_err(bp->dev,
"inconsistent Rx descriptor chain\n");
bp->dev->stats.rx_dropped++;
+ queue->stats.rx_dropped++;
break;
}
/* now everything is ready for receiving packet */
- bp->rx_skbuff[entry] = NULL;
+ queue->rx_skbuff[entry] = NULL;
len = ctrl & bp->rx_frm_len_mask;
netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
@@ -1019,7 +1027,9 @@ static int gem_rx(struct macb *bp, int budget)
skb->ip_summed = CHECKSUM_UNNECESSARY;
bp->dev->stats.rx_packets++;
+ queue->stats.rx_packets++;
bp->dev->stats.rx_bytes += skb->len;
+ queue->stats.rx_bytes += skb->len;
gem_ptp_do_rxstamp(bp, skb, desc);
@@ -1035,12 +1045,12 @@ static int gem_rx(struct macb *bp, int budget)
netif_receive_skb(skb);
}
- gem_rx_refill(bp);
+ gem_rx_refill(queue);
return count;
}
-static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
+static int macb_rx_frame(struct macb_queue *queue, unsigned int first_frag,
unsigned int last_frag)
{
unsigned int len;
@@ -1048,8 +1058,9 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
unsigned int offset;
struct sk_buff *skb;
struct macb_dma_desc *desc;
+ struct macb *bp = queue->bp;
- desc = macb_rx_desc(bp, last_frag);
+ desc = macb_rx_desc(queue, last_frag);
len = desc->ctrl & bp->rx_frm_len_mask;
netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
@@ -1068,7 +1079,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
if (!skb) {
bp->dev->stats.rx_dropped++;
for (frag = first_frag; ; frag++) {
- desc = macb_rx_desc(bp, frag);
+ desc = macb_rx_desc(queue, frag);
desc->addr &= ~MACB_BIT(RX_USED);
if (frag == last_frag)
break;
@@ -1096,10 +1107,10 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
frag_len = len - offset;
}
skb_copy_to_linear_data_offset(skb, offset,
- macb_rx_buffer(bp, frag),
+ macb_rx_buffer(queue, frag),
frag_len);
offset += bp->rx_buffer_size;
- desc = macb_rx_desc(bp, frag);
+ desc = macb_rx_desc(queue, frag);
desc->addr &= ~MACB_BIT(RX_USED);
if (frag == last_frag)
@@ -1121,32 +1132,34 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
return 0;
}
-static inline void macb_init_rx_ring(struct macb *bp)
+static inline void macb_init_rx_ring(struct macb_queue *queue)
{
+ struct macb *bp = queue->bp;
dma_addr_t addr;
struct macb_dma_desc *desc = NULL;
int i;
- addr = bp->rx_buffers_dma;
+ addr = queue->rx_buffers_dma;
for (i = 0; i < bp->rx_ring_size; i++) {
- desc = macb_rx_desc(bp, i);
+ desc = macb_rx_desc(queue, i);
macb_set_addr(bp, desc, addr);
desc->ctrl = 0;
addr += bp->rx_buffer_size;
}
desc->addr |= MACB_BIT(RX_WRAP);
- bp->rx_tail = 0;
+ queue->rx_tail = 0;
}
-static int macb_rx(struct macb *bp, int budget)
+static int macb_rx(struct macb_queue *queue, int budget)
{
+ struct macb *bp = queue->bp;
bool reset_rx_queue = false;
int received = 0;
unsigned int tail;
int first_frag = -1;
- for (tail = bp->rx_tail; budget > 0; tail++) {
- struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
+ for (tail = queue->rx_tail; budget > 0; tail++) {
+ struct macb_dma_desc *desc = macb_rx_desc(queue, tail);
u32 ctrl;
/* Make hw descriptor updates visible to CPU */
@@ -1159,7 +1172,7 @@ static int macb_rx(struct macb *bp, int budget)
if (ctrl & MACB_BIT(RX_SOF)) {
if (first_frag != -1)
- discard_partial_frame(bp, first_frag, tail);
+ discard_partial_frame(queue, first_frag, tail);
first_frag = tail;
}
@@ -1171,7 +1184,7 @@ static int macb_rx(struct macb *bp, int budget)
continue;
}
- dropped = macb_rx_frame(bp, first_frag, tail);
+ dropped = macb_rx_frame(queue, first_frag, tail);
first_frag = -1;
if (unlikely(dropped < 0)) {
reset_rx_queue = true;
@@ -1195,8 +1208,8 @@ static int macb_rx(struct macb *bp, int budget)
ctrl = macb_readl(bp, NCR);
macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
- macb_init_rx_ring(bp);
- macb_writel(bp, RBQP, bp->rx_ring_dma);
+ macb_init_rx_ring(queue);
+ queue_writel(queue, RBQP, queue->rx_ring_dma);
macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
@@ -1205,16 +1218,17 @@ static int macb_rx(struct macb *bp, int budget)
}
if (first_frag != -1)
- bp->rx_tail = first_frag;
+ queue->rx_tail = first_frag;
else
- bp->rx_tail = tail;
+ queue->rx_tail = tail;
return received;
}
static int macb_poll(struct napi_struct *napi, int budget)
{
- struct macb *bp = container_of(napi, struct macb, napi);
+ struct macb_queue *queue = container_of(napi, struct macb_queue, napi);
+ struct macb *bp = queue->bp;
int work_done;
u32 status;
@@ -1224,7 +1238,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
(unsigned long)status, budget);
- work_done = bp->macbgem_ops.mog_rx(bp, budget);
+ work_done = bp->macbgem_ops.mog_rx(queue, budget);
if (work_done < budget) {
napi_complete_done(napi, work_done);
@@ -1232,10 +1246,10 @@ static int macb_poll(struct napi_struct *napi, int budget)
status = macb_readl(bp, RSR);
if (status) {
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
- macb_writel(bp, ISR, MACB_BIT(RCOMP));
+ queue_writel(queue, ISR, MACB_BIT(RCOMP));
napi_reschedule(napi);
} else {
- macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+ queue_writel(queue, IER, MACB_RX_INT_FLAGS);
}
}
@@ -1244,6 +1258,57 @@ static int macb_poll(struct napi_struct *napi, int budget)
return work_done;
}
+static void macb_hresp_error_task(unsigned long data)
+{
+ struct macb *bp = (struct macb *)data;
+ struct net_device *dev = bp->dev;
+ struct macb_queue *queue = bp->queues;
+ unsigned int q;
+ u32 ctrl;
+
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, IDR, MACB_RX_INT_FLAGS |
+ MACB_TX_INT_FLAGS |
+ MACB_BIT(HRESP));
+ }
+ ctrl = macb_readl(bp, NCR);
+ ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
+ macb_writel(bp, NCR, ctrl);
+
+ netif_tx_stop_all_queues(dev);
+ netif_carrier_off(dev);
+
+ bp->macbgem_ops.mog_init_rings(bp);
+
+ /* Initialize TX and RX buffers */
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ queue_writel(queue, RBQPH,
+ upper_32_bits(queue->rx_ring_dma));
+#endif
+ queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ queue_writel(queue, TBQPH,
+ upper_32_bits(queue->tx_ring_dma));
+#endif
+
+ /* Enable interrupts */
+ queue_writel(queue, IER,
+ MACB_RX_INT_FLAGS |
+ MACB_TX_INT_FLAGS |
+ MACB_BIT(HRESP));
+ }
+
+ ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
+ macb_writel(bp, NCR, ctrl);
+
+ netif_carrier_on(dev);
+ netif_tx_start_all_queues(dev);
+}
+
static irqreturn_t macb_interrupt(int irq, void *dev_id)
{
struct macb_queue *queue = dev_id;
@@ -1282,9 +1347,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(RCOMP));
- if (napi_schedule_prep(&bp->napi)) {
+ if (napi_schedule_prep(&queue->napi)) {
netdev_vdbg(bp->dev, "scheduling RX softirq\n");
- __napi_schedule(&bp->napi);
+ __napi_schedule(&queue->napi);
}
}
@@ -1333,10 +1398,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
}
if (status & MACB_BIT(HRESP)) {
- /* TODO: Reset the hardware, and maybe move the
- * netdev_err to a lower-priority context as well
- * (work queue?)
- */
+ tasklet_schedule(&bp->hresp_err_tasklet);
netdev_err(dev, "DMA bus error: HRESP not OK\n");
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
@@ -1708,38 +1770,44 @@ static void gem_free_rx_buffers(struct macb *bp)
{
struct sk_buff *skb;
struct macb_dma_desc *desc;
+ struct macb_queue *queue;
dma_addr_t addr;
+ unsigned int q;
int i;
- if (!bp->rx_skbuff)
- return;
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ if (!queue->rx_skbuff)
+ continue;
- for (i = 0; i < bp->rx_ring_size; i++) {
- skb = bp->rx_skbuff[i];
+ for (i = 0; i < bp->rx_ring_size; i++) {
+ skb = queue->rx_skbuff[i];
- if (!skb)
- continue;
+ if (!skb)
+ continue;
- desc = macb_rx_desc(bp, i);
- addr = macb_get_addr(bp, desc);
+ desc = macb_rx_desc(queue, i);
+ addr = macb_get_addr(bp, desc);
- dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- skb = NULL;
- }
+ dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ }
- kfree(bp->rx_skbuff);
- bp->rx_skbuff = NULL;
+ kfree(queue->rx_skbuff);
+ queue->rx_skbuff = NULL;
+ }
}
static void macb_free_rx_buffers(struct macb *bp)
{
- if (bp->rx_buffers) {
+ struct macb_queue *queue = &bp->queues[0];
+
+ if (queue->rx_buffers) {
dma_free_coherent(&bp->pdev->dev,
bp->rx_ring_size * bp->rx_buffer_size,
- bp->rx_buffers, bp->rx_buffers_dma);
- bp->rx_buffers = NULL;
+ queue->rx_buffers, queue->rx_buffers_dma);
+ queue->rx_buffers = NULL;
}
}
@@ -1748,11 +1816,12 @@ static void macb_free_consistent(struct macb *bp)
struct macb_queue *queue;
unsigned int q;
+ queue = &bp->queues[0];
bp->macbgem_ops.mog_free_rx_buffers(bp);
- if (bp->rx_ring) {
+ if (queue->rx_ring) {
dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp),
- bp->rx_ring, bp->rx_ring_dma);
- bp->rx_ring = NULL;
+ queue->rx_ring, queue->rx_ring_dma);
+ queue->rx_ring = NULL;
}
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
@@ -1768,32 +1837,37 @@ static void macb_free_consistent(struct macb *bp)
static int gem_alloc_rx_buffers(struct macb *bp)
{
+ struct macb_queue *queue;
+ unsigned int q;
int size;
- size = bp->rx_ring_size * sizeof(struct sk_buff *);
- bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
- if (!bp->rx_skbuff)
- return -ENOMEM;
- else
- netdev_dbg(bp->dev,
- "Allocated %d RX struct sk_buff entries at %p\n",
- bp->rx_ring_size, bp->rx_skbuff);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ size = bp->rx_ring_size * sizeof(struct sk_buff *);
+ queue->rx_skbuff = kzalloc(size, GFP_KERNEL);
+ if (!queue->rx_skbuff)
+ return -ENOMEM;
+ else
+ netdev_dbg(bp->dev,
+ "Allocated %d RX struct sk_buff entries at %p\n",
+ bp->rx_ring_size, queue->rx_skbuff);
+ }
return 0;
}
static int macb_alloc_rx_buffers(struct macb *bp)
{
+ struct macb_queue *queue = &bp->queues[0];
int size;
size = bp->rx_ring_size * bp->rx_buffer_size;
- bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
- &bp->rx_buffers_dma, GFP_KERNEL);
- if (!bp->rx_buffers)
+ queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
+ &queue->rx_buffers_dma, GFP_KERNEL);
+ if (!queue->rx_buffers)
return -ENOMEM;
netdev_dbg(bp->dev,
"Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
+ size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers);
return 0;
}
@@ -1819,17 +1893,16 @@ static int macb_alloc_consistent(struct macb *bp)
queue->tx_skb = kmalloc(size, GFP_KERNEL);
if (!queue->tx_skb)
goto out_err;
- }
-
- size = RX_RING_BYTES(bp);
- bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
- &bp->rx_ring_dma, GFP_KERNEL);
- if (!bp->rx_ring)
- goto out_err;
- netdev_dbg(bp->dev,
- "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
+ size = RX_RING_BYTES(bp);
+ queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
+ &queue->rx_ring_dma, GFP_KERNEL);
+ if (!queue->rx_ring)
+ goto out_err;
+ netdev_dbg(bp->dev,
+ "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
+ size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
+ }
if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
goto out_err;
@@ -1856,12 +1929,13 @@ static void gem_init_rings(struct macb *bp)
desc->ctrl |= MACB_BIT(TX_WRAP);
queue->tx_head = 0;
queue->tx_tail = 0;
- }
- bp->rx_tail = 0;
- bp->rx_prepared_head = 0;
+ queue->rx_tail = 0;
+ queue->rx_prepared_head = 0;
+
+ gem_rx_refill(queue);
+ }
- gem_rx_refill(bp);
}
static void macb_init_rings(struct macb *bp)
@@ -1869,7 +1943,7 @@ static void macb_init_rings(struct macb *bp)
int i;
struct macb_dma_desc *desc = NULL;
- macb_init_rx_ring(bp);
+ macb_init_rx_ring(&bp->queues[0]);
for (i = 0; i < bp->tx_ring_size; i++) {
desc = macb_tx_desc(&bp->queues[0], i);
@@ -1978,11 +2052,20 @@ static u32 macb_dbw(struct macb *bp)
*/
static void macb_configure_dma(struct macb *bp)
{
+ struct macb_queue *queue;
+ u32 buffer_size;
+ unsigned int q;
u32 dmacfg;
+ buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE;
if (macb_is_gem(bp)) {
dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
- dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ if (q)
+ queue_writel(queue, RBQS, buffer_size);
+ else
+ dmacfg |= GEM_BF(RXBS, buffer_size);
+ }
if (bp->dma_burst_length)
dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
@@ -2051,12 +2134,12 @@ static void macb_init_hw(struct macb *bp)
macb_configure_dma(bp);
/* Initialize TX and RX buffers */
- macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma));
#endif
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (bp->hw_dma_cap & HW_DMA_CAP_64B)
@@ -2197,6 +2280,8 @@ static int macb_open(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
+ struct macb_queue *queue;
+ unsigned int q;
int err;
netdev_dbg(bp->dev, "open\n");
@@ -2218,11 +2303,12 @@ static int macb_open(struct net_device *dev)
return err;
}
- napi_enable(&bp->napi);
-
bp->macbgem_ops.mog_init_rings(bp);
macb_init_hw(bp);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+ napi_enable(&queue->napi);
+
/* schedule a link state check */
phy_start(dev->phydev);
@@ -2237,10 +2323,14 @@ static int macb_open(struct net_device *dev)
static int macb_close(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
+ struct macb_queue *queue;
unsigned long flags;
+ unsigned int q;
netif_tx_stop_all_queues(dev);
- napi_disable(&bp->napi);
+
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+ napi_disable(&queue->napi);
if (dev->phydev)
phy_stop(dev->phydev);
@@ -2270,7 +2360,10 @@ static int macb_change_mtu(struct net_device *dev, int new_mtu)
static void gem_update_stats(struct macb *bp)
{
- unsigned int i;
+ struct macb_queue *queue;
+ unsigned int i, q, idx;
+ unsigned long *stat;
+
u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
@@ -2287,6 +2380,11 @@ static void gem_update_stats(struct macb *bp)
*(++p) += val;
}
}
+
+ idx = GEM_STATS_LEN;
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+ for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
+ bp->ethtool_stats[idx++] = *stat;
}
static struct net_device_stats *gem_get_stats(struct macb *bp)
@@ -2334,14 +2432,17 @@ static void gem_get_ethtool_stats(struct net_device *dev,
bp = netdev_priv(dev);
gem_update_stats(bp);
- memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN);
+ memcpy(data, &bp->ethtool_stats, sizeof(u64)
+ * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
}
static int gem_get_sset_count(struct net_device *dev, int sset)
{
+ struct macb *bp = netdev_priv(dev);
+
switch (sset) {
case ETH_SS_STATS:
- return GEM_STATS_LEN;
+ return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN;
default:
return -EOPNOTSUPP;
}
@@ -2349,13 +2450,25 @@ static int gem_get_sset_count(struct net_device *dev, int sset)
static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
{
+ char stat_string[ETH_GSTRING_LEN];
+ struct macb *bp = netdev_priv(dev);
+ struct macb_queue *queue;
unsigned int i;
+ unsigned int q;
switch (sset) {
case ETH_SS_STATS:
for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
memcpy(p, gem_statistics[i].stat_string,
ETH_GSTRING_LEN);
+
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) {
+ snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s",
+ q, queue_statistics[i].stat_string);
+ memcpy(p, stat_string, ETH_GSTRING_LEN);
+ }
+ }
break;
}
}
@@ -2603,6 +2716,307 @@ static int macb_get_ts_info(struct net_device *netdev,
return ethtool_op_get_ts_info(netdev, info);
}
+static void gem_enable_flow_filters(struct macb *bp, bool enable)
+{
+ struct ethtool_rx_fs_item *item;
+ u32 t2_scr;
+ int num_t2_scr;
+
+ num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8));
+
+ list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+ struct ethtool_rx_flow_spec *fs = &item->fs;
+ struct ethtool_tcpip4_spec *tp4sp_m;
+
+ if (fs->location >= num_t2_scr)
+ continue;
+
+ t2_scr = gem_readl_n(bp, SCRT2, fs->location);
+
+ /* enable/disable screener regs for the flow entry */
+ t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr);
+
+ /* only enable fields with no masking */
+ tp4sp_m = &(fs->m_u.tcp_ip4_spec);
+
+ if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF))
+ t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr);
+ else
+ t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr);
+
+ if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF))
+ t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr);
+ else
+ t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr);
+
+ if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)))
+ t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr);
+ else
+ t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr);
+
+ gem_writel_n(bp, SCRT2, fs->location, t2_scr);
+ }
+}
+
+static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m;
+ uint16_t index = fs->location;
+ u32 w0, w1, t2_scr;
+ bool cmp_a = false;
+ bool cmp_b = false;
+ bool cmp_c = false;
+
+ tp4sp_v = &(fs->h_u.tcp_ip4_spec);
+ tp4sp_m = &(fs->m_u.tcp_ip4_spec);
+
+ /* ignore field if any masking set */
+ if (tp4sp_m->ip4src == 0xFFFFFFFF) {
+ /* 1st compare reg - IP source address */
+ w0 = 0;
+ w1 = 0;
+ w0 = tp4sp_v->ip4src;
+ w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
+ w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
+ w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1);
+ gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0);
+ gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1);
+ cmp_a = true;
+ }
+
+ /* ignore field if any masking set */
+ if (tp4sp_m->ip4dst == 0xFFFFFFFF) {
+ /* 2nd compare reg - IP destination address */
+ w0 = 0;
+ w1 = 0;
+ w0 = tp4sp_v->ip4dst;
+ w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
+ w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
+ w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1);
+ gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0);
+ gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1);
+ cmp_b = true;
+ }
+
+ /* ignore both port fields if masking set in both */
+ if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) {
+ /* 3rd compare reg - source port, destination port */
+ w0 = 0;
+ w1 = 0;
+ w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1);
+ if (tp4sp_m->psrc == tp4sp_m->pdst) {
+ w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0);
+ w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
+ w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
+ w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
+ } else {
+ /* only one port definition */
+ w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */
+ w0 = GEM_BFINS(T2MASK, 0xFFFF, w0);
+ if (tp4sp_m->psrc == 0xFFFF) { /* src port */
+ w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0);
+ w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
+ } else { /* dst port */
+ w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
+ w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1);
+ }
+ }
+ gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0);
+ gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1);
+ cmp_c = true;
+ }
+
+ t2_scr = 0;
+ t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr);
+ t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr);
+ if (cmp_a)
+ t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr);
+ if (cmp_b)
+ t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr);
+ if (cmp_c)
+ t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr);
+ gem_writel_n(bp, SCRT2, index, t2_scr);
+}
+
+static int gem_add_flow_filter(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct macb *bp = netdev_priv(netdev);
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
+ struct ethtool_rx_fs_item *item, *newfs;
+ unsigned long flags;
+ int ret = -EINVAL;
+ bool added = false;
+
+ newfs = kmalloc(sizeof(*newfs), GFP_KERNEL);
+ if (newfs == NULL)
+ return -ENOMEM;
+ memcpy(&newfs->fs, fs, sizeof(newfs->fs));
+
+ netdev_dbg(netdev,
+ "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
+ fs->flow_type, (int)fs->ring_cookie, fs->location,
+ htonl(fs->h_u.tcp_ip4_spec.ip4src),
+ htonl(fs->h_u.tcp_ip4_spec.ip4dst),
+ htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst));
+
+ spin_lock_irqsave(&bp->rx_fs_lock, flags);
+
+ /* find correct place to add in list */
+ list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+ if (item->fs.location > newfs->fs.location) {
+ list_add_tail(&newfs->list, &item->list);
+ added = true;
+ break;
+ } else if (item->fs.location == fs->location) {
+ netdev_err(netdev, "Rule not added: location %d not free!\n",
+ fs->location);
+ ret = -EBUSY;
+ goto err;
+ }
+ }
+ if (!added)
+ list_add_tail(&newfs->list, &bp->rx_fs_list.list);
+
+ gem_prog_cmp_regs(bp, fs);
+ bp->rx_fs_list.count++;
+ /* enable filtering if NTUPLE on */
+ if (netdev->features & NETIF_F_NTUPLE)
+ gem_enable_flow_filters(bp, 1);
+
+ spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
+ return 0;
+
+err:
+ spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
+ kfree(newfs);
+ return ret;
+}
+
+static int gem_del_flow_filter(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct macb *bp = netdev_priv(netdev);
+ struct ethtool_rx_fs_item *item;
+ struct ethtool_rx_flow_spec *fs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bp->rx_fs_lock, flags);
+
+ list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+ if (item->fs.location == cmd->fs.location) {
+ /* disable screener regs for the flow entry */
+ fs = &(item->fs);
+ netdev_dbg(netdev,
+ "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
+ fs->flow_type, (int)fs->ring_cookie, fs->location,
+ htonl(fs->h_u.tcp_ip4_spec.ip4src),
+ htonl(fs->h_u.tcp_ip4_spec.ip4dst),
+ htons(fs->h_u.tcp_ip4_spec.psrc),
+ htons(fs->h_u.tcp_ip4_spec.pdst));
+
+ gem_writel_n(bp, SCRT2, fs->location, 0);
+
+ list_del(&item->list);
+ bp->rx_fs_list.count--;
+ spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
+ kfree(item);
+ return 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
+ return -EINVAL;
+}
+
+static int gem_get_flow_entry(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct macb *bp = netdev_priv(netdev);
+ struct ethtool_rx_fs_item *item;
+
+ list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+ if (item->fs.location == cmd->fs.location) {
+ memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs));
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int gem_get_all_flow_entries(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct macb *bp = netdev_priv(netdev);
+ struct ethtool_rx_fs_item *item;
+ uint32_t cnt = 0;
+
+ list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+ rule_locs[cnt] = item->fs.location;
+ cnt++;
+ }
+ cmd->data = bp->max_tuples;
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
+static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct macb *bp = netdev_priv(netdev);
+ int ret = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = bp->num_queues;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = bp->rx_fs_list.count;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = gem_get_flow_entry(netdev, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = gem_get_all_flow_entries(netdev, cmd, rule_locs);
+ break;
+ default:
+ netdev_err(netdev,
+ "Command parameter %d is not supported\n", cmd->cmd);
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct macb *bp = netdev_priv(netdev);
+ int ret;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ if ((cmd->fs.location >= bp->max_tuples)
+ || (cmd->fs.ring_cookie >= bp->num_queues)) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = gem_add_flow_filter(netdev, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = gem_del_flow_filter(netdev, cmd);
+ break;
+ default:
+ netdev_err(netdev,
+ "Command parameter %d is not supported\n", cmd->cmd);
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
static const struct ethtool_ops macb_ethtool_ops = {
.get_regs_len = macb_get_regs_len,
.get_regs = macb_get_regs,
@@ -2628,6 +3042,8 @@ static const struct ethtool_ops gem_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_ringparam = macb_get_ringparam,
.set_ringparam = macb_set_ringparam,
+ .get_rxnfc = gem_get_rxnfc,
+ .set_rxnfc = gem_set_rxnfc,
};
static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -2685,6 +3101,12 @@ static int macb_set_features(struct net_device *netdev,
gem_writel(bp, NCFGR, netcfg);
}
+ /* RX Flow Filters */
+ if ((changed & NETIF_F_NTUPLE) && macb_is_gem(bp)) {
+ bool turn_on = features & NETIF_F_NTUPLE;
+
+ gem_enable_flow_filters(bp, turn_on);
+ }
return 0;
}
@@ -2850,7 +3272,7 @@ static int macb_init(struct platform_device *pdev)
struct macb *bp = netdev_priv(dev);
struct macb_queue *queue;
int err;
- u32 val;
+ u32 val, reg;
bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
@@ -2865,15 +3287,20 @@ static int macb_init(struct platform_device *pdev)
queue = &bp->queues[q];
queue->bp = bp;
+ netif_napi_add(dev, &queue->napi, macb_poll, 64);
if (hw_q) {
queue->ISR = GEM_ISR(hw_q - 1);
queue->IER = GEM_IER(hw_q - 1);
queue->IDR = GEM_IDR(hw_q - 1);
queue->IMR = GEM_IMR(hw_q - 1);
queue->TBQP = GEM_TBQP(hw_q - 1);
+ queue->RBQP = GEM_RBQP(hw_q - 1);
+ queue->RBQS = GEM_RBQS(hw_q - 1);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
queue->TBQPH = GEM_TBQPH(hw_q - 1);
+ queue->RBQPH = GEM_RBQPH(hw_q - 1);
+ }
#endif
} else {
/* queue0 uses legacy registers */
@@ -2882,9 +3309,12 @@ static int macb_init(struct platform_device *pdev)
queue->IDR = MACB_IDR;
queue->IMR = MACB_IMR;
queue->TBQP = MACB_TBQP;
+ queue->RBQP = MACB_RBQP;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
queue->TBQPH = MACB_TBQPH;
+ queue->RBQPH = MACB_RBQPH;
+ }
#endif
}
@@ -2908,7 +3338,6 @@ static int macb_init(struct platform_device *pdev)
}
dev->netdev_ops = &macb_netdev_ops;
- netif_napi_add(dev, &bp->napi, macb_poll, 64);
/* setup appropriated routines according to adapter type */
if (macb_is_gem(bp)) {
@@ -2941,6 +3370,30 @@ static int macb_init(struct platform_device *pdev)
dev->hw_features &= ~NETIF_F_SG;
dev->features = dev->hw_features;
+ /* Check RX Flow Filters support.
+ * Max Rx flows set by availability of screeners & compare regs:
+ * each 4-tuple define requires 1 T2 screener reg + 3 compare regs
+ */
+ reg = gem_readl(bp, DCFG8);
+ bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
+ GEM_BFEXT(T2SCR, reg));
+ if (bp->max_tuples > 0) {
+ /* also needs one ethtype match to check IPv4 */
+ if (GEM_BFEXT(SCR2ETH, reg) > 0) {
+ /* program this reg now */
+ reg = 0;
+ reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg);
+ gem_writel_n(bp, ETHT, SCRT2_ETHT, reg);
+ /* Filtering is supported in hw but don't enable it in kernel now */
+ dev->hw_features |= NETIF_F_NTUPLE;
+ /* init Rx flow definitions */
+ INIT_LIST_HEAD(&bp->rx_fs_list.list);
+ bp->rx_fs_list.count = 0;
+ spin_lock_init(&bp->rx_fs_lock);
+ } else
+ bp->max_tuples = 0;
+ }
+
if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
val = 0;
if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
@@ -2977,34 +3430,35 @@ static int macb_init(struct platform_device *pdev)
static int at91ether_start(struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
+ struct macb_queue *q = &lp->queues[0];
struct macb_dma_desc *desc;
dma_addr_t addr;
u32 ctl;
int i;
- lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
+ q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
(AT91ETHER_MAX_RX_DESCR *
macb_dma_desc_get_size(lp)),
- &lp->rx_ring_dma, GFP_KERNEL);
- if (!lp->rx_ring)
+ &q->rx_ring_dma, GFP_KERNEL);
+ if (!q->rx_ring)
return -ENOMEM;
- lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
+ q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR *
AT91ETHER_MAX_RBUFF_SZ,
- &lp->rx_buffers_dma, GFP_KERNEL);
- if (!lp->rx_buffers) {
+ &q->rx_buffers_dma, GFP_KERNEL);
+ if (!q->rx_buffers) {
dma_free_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR *
macb_dma_desc_get_size(lp),
- lp->rx_ring, lp->rx_ring_dma);
- lp->rx_ring = NULL;
+ q->rx_ring, q->rx_ring_dma);
+ q->rx_ring = NULL;
return -ENOMEM;
}
- addr = lp->rx_buffers_dma;
+ addr = q->rx_buffers_dma;
for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
- desc = macb_rx_desc(lp, i);
+ desc = macb_rx_desc(q, i);
macb_set_addr(lp, desc, addr);
desc->ctrl = 0;
addr += AT91ETHER_MAX_RBUFF_SZ;
@@ -3014,10 +3468,10 @@ static int at91ether_start(struct net_device *dev)
desc->addr |= MACB_BIT(RX_WRAP);
/* Reset buffer index */
- lp->rx_tail = 0;
+ q->rx_tail = 0;
/* Program address of descriptor list in Rx Buffer Queue register */
- macb_writel(lp, RBQP, lp->rx_ring_dma);
+ macb_writel(lp, RBQP, q->rx_ring_dma);
/* Enable Receive and Transmit */
ctl = macb_readl(lp, NCR);
@@ -3064,6 +3518,7 @@ static int at91ether_open(struct net_device *dev)
static int at91ether_close(struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
+ struct macb_queue *q = &lp->queues[0];
u32 ctl;
/* Disable Receiver and Transmitter */
@@ -3084,13 +3539,13 @@ static int at91ether_close(struct net_device *dev)
dma_free_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR *
macb_dma_desc_get_size(lp),
- lp->rx_ring, lp->rx_ring_dma);
- lp->rx_ring = NULL;
+ q->rx_ring, q->rx_ring_dma);
+ q->rx_ring = NULL;
dma_free_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
- lp->rx_buffers, lp->rx_buffers_dma);
- lp->rx_buffers = NULL;
+ q->rx_buffers, q->rx_buffers_dma);
+ q->rx_buffers = NULL;
return 0;
}
@@ -3134,14 +3589,15 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void at91ether_rx(struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
+ struct macb_queue *q = &lp->queues[0];
struct macb_dma_desc *desc;
unsigned char *p_recv;
struct sk_buff *skb;
unsigned int pktlen;
- desc = macb_rx_desc(lp, lp->rx_tail);
+ desc = macb_rx_desc(q, q->rx_tail);
while (desc->addr & MACB_BIT(RX_USED)) {
- p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
+ p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
skb = netdev_alloc_skb(dev, pktlen + 2);
if (skb) {
@@ -3163,12 +3619,12 @@ static void at91ether_rx(struct net_device *dev)
desc->addr &= ~MACB_BIT(RX_USED);
/* wrap after last buffer */
- if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
- lp->rx_tail = 0;
+ if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
+ q->rx_tail = 0;
else
- lp->rx_tail++;
+ q->rx_tail++;
- desc = macb_rx_desc(lp, lp->rx_tail);
+ desc = macb_rx_desc(q, q->rx_tail);
}
}
@@ -3394,7 +3850,6 @@ static int macb_probe(struct platform_device *pdev)
= macb_config->clk_init;
int (*init)(struct platform_device *) = macb_config->init;
struct device_node *np = pdev->dev.of_node;
- struct device_node *phy_node;
struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
unsigned int queue_mask, num_queues;
struct macb_platform_data *pdata;
@@ -3500,18 +3955,6 @@ static int macb_probe(struct platform_device *pdev)
else
macb_get_hwaddr(bp);
- /* Power up the PHY if there is a GPIO reset */
- phy_node = of_get_next_available_child(np, NULL);
- if (phy_node) {
- int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
-
- if (gpio_is_valid(gpio)) {
- bp->reset_gpio = gpio_to_desc(gpio);
- gpiod_direction_output(bp->reset_gpio, 1);
- }
- }
- of_node_put(phy_node);
-
err = of_get_phy_mode(np);
if (err < 0) {
pdata = dev_get_platdata(&pdev->dev);
@@ -3542,6 +3985,9 @@ static int macb_probe(struct platform_device *pdev)
goto err_out_unregister_mdio;
}
+ tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
+ (unsigned long)bp);
+
phy_attached_info(phydev);
netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
@@ -3558,10 +4004,6 @@ err_out_unregister_mdio:
of_phy_deregister_fixed_link(np);
mdiobus_free(bp->mii_bus);
- /* Shutdown the PHY if there is a GPIO reset */
- if (bp->reset_gpio)
- gpiod_set_value(bp->reset_gpio, 0);
-
err_out_free_netdev:
free_netdev(dev);
@@ -3592,10 +4034,6 @@ static int macb_remove(struct platform_device *pdev)
dev->phydev = NULL;
mdiobus_free(bp->mii_bus);
- /* Shutdown the PHY if there is a GPIO reset */
- if (bp->reset_gpio)
- gpiod_set_value(bp->reset_gpio, 0);
-
unregister_netdev(dev);
clk_disable_unprepare(bp->tx_clk);
clk_disable_unprepare(bp->hclk);
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 63be75eb34d2..043e3c11c42b 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -27,6 +27,7 @@ config THUNDER_NIC_PF
config THUNDER_NIC_VF
tristate "Thunder Virtual function driver"
+ imply CAVIUM_PTP
depends on 64BIT
---help---
This driver supports Thunder's NIC virtual function
@@ -50,6 +51,18 @@ config THUNDER_NIC_RGX
This driver supports configuring XCV block of RGX interface
present on CN81XX chip.
+config CAVIUM_PTP
+ tristate "Cavium PTP coprocessor as PTP clock"
+ depends on 64BIT
+ imply PTP_1588_CLOCK
+ default y
+ ---help---
+ This driver adds support for the Precision Time Protocol Clocks and
+ Timestamping coprocessor (PTP) found on Cavium processors.
+ PTP provides timestamping mechanism that is suitable for use in IEEE 1588
+ Precision Time Protocol or other purposes. Timestamps can be used in
+ BGX, TNS, GTI, and NIC blocks.
+
config LIQUIDIO
tristate "Cavium LiquidIO support"
depends on 64BIT
diff --git a/drivers/net/ethernet/cavium/Makefile b/drivers/net/ethernet/cavium/Makefile
index 872da9f7c31a..946bba84e81d 100644
--- a/drivers/net/ethernet/cavium/Makefile
+++ b/drivers/net/ethernet/cavium/Makefile
@@ -1,6 +1,7 @@
#
# Makefile for the Cavium ethernet device drivers.
#
+obj-$(CONFIG_NET_VENDOR_CAVIUM) += common/
obj-$(CONFIG_NET_VENDOR_CAVIUM) += thunder/
obj-$(CONFIG_NET_VENDOR_CAVIUM) += liquidio/
obj-$(CONFIG_NET_VENDOR_CAVIUM) += octeon/
diff --git a/drivers/net/ethernet/cavium/common/Makefile b/drivers/net/ethernet/cavium/common/Makefile
new file mode 100644
index 000000000000..dd8561b8060b
--- /dev/null
+++ b/drivers/net/ethernet/cavium/common/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CAVIUM_PTP) += cavium_ptp.o
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
new file mode 100644
index 000000000000..c87c9c684a33
--- /dev/null
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0
+/* cavium_ptp.c - PTP 1588 clock on Cavium hardware
+ * Copyright (c) 2003-2015, 2017 Cavium, Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/timecounter.h>
+#include <linux/pci.h>
+
+#include "cavium_ptp.h"
+
+#define DRV_NAME "Cavium PTP Driver"
+
+#define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C
+#define PCI_DEVICE_ID_CAVIUM_RST 0xA00E
+
+#define PCI_PTP_BAR_NO 0
+#define PCI_RST_BAR_NO 0
+
+#define PTP_CLOCK_CFG 0xF00ULL
+#define PTP_CLOCK_CFG_PTP_EN BIT(0)
+#define PTP_CLOCK_LO 0xF08ULL
+#define PTP_CLOCK_HI 0xF10ULL
+#define PTP_CLOCK_COMP 0xF18ULL
+
+#define RST_BOOT 0x1600ULL
+#define CLOCK_BASE_RATE 50000000ULL
+
+static u64 ptp_cavium_clock_get(void)
+{
+ struct pci_dev *pdev;
+ void __iomem *base;
+ u64 ret = CLOCK_BASE_RATE * 16;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_CAVIUM_RST, NULL);
+ if (!pdev)
+ goto error;
+
+ base = pci_ioremap_bar(pdev, PCI_RST_BAR_NO);
+ if (!base)
+ goto error_put_pdev;
+
+ ret = CLOCK_BASE_RATE * ((readq(base + RST_BOOT) >> 33) & 0x3f);
+
+ iounmap(base);
+
+error_put_pdev:
+ pci_dev_put(pdev);
+
+error:
+ return ret;
+}
+
+struct cavium_ptp *cavium_ptp_get(void)
+{
+ struct cavium_ptp *ptp;
+ struct pci_dev *pdev;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_CAVIUM_PTP, NULL);
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ ptp = pci_get_drvdata(pdev);
+ if (!ptp)
+ ptp = ERR_PTR(-EPROBE_DEFER);
+ if (IS_ERR(ptp))
+ pci_dev_put(pdev);
+
+ return ptp;
+}
+EXPORT_SYMBOL(cavium_ptp_get);
+
+void cavium_ptp_put(struct cavium_ptp *ptp)
+{
+ pci_dev_put(ptp->pdev);
+}
+EXPORT_SYMBOL(cavium_ptp_put);
+
+/**
+ * cavium_ptp_adjfine() - Adjust ptp frequency
+ * @ptp: PTP clock info
+ * @scaled_ppm: how much to adjust by, in parts per million, but with a
+ * 16 bit binary fractional field
+ */
+static int cavium_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
+{
+ struct cavium_ptp *clock =
+ container_of(ptp_info, struct cavium_ptp, ptp_info);
+ unsigned long flags;
+ u64 comp;
+ u64 adj;
+ bool neg_adj = false;
+
+ if (scaled_ppm < 0) {
+ neg_adj = true;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ /* The hardware adds the clock compensation value to the PTP clock
+ * on every coprocessor clock cycle. Typical convention is that it
+ * represent number of nanosecond betwen each cycle. In this
+ * convention compensation value is in 64 bit fixed-point
+ * representation where upper 32 bits are number of nanoseconds
+ * and lower is fractions of nanosecond.
+ * The scaled_ppm represent the ratio in "parts per bilion" by which the
+ * compensation value should be corrected.
+ * To calculate new compenstation value we use 64bit fixed point
+ * arithmetic on following formula
+ * comp = tbase + tbase * scaled_ppm / (1M * 2^16)
+ * where tbase is the basic compensation value calculated initialy
+ * in cavium_ptp_init() -> tbase = 1/Hz. Then we use endian
+ * independent structure definition to write data to PTP register.
+ */
+ comp = ((u64)1000000000ull << 32) / clock->clock_rate;
+ adj = comp * scaled_ppm;
+ adj >>= 16;
+ adj = div_u64(adj, 1000000ull);
+ comp = neg_adj ? comp - adj : comp + adj;
+
+ spin_lock_irqsave(&clock->spin_lock, flags);
+ writeq(comp, clock->reg_base + PTP_CLOCK_COMP);
+ spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+ return 0;
+}
+
+/**
+ * cavium_ptp_adjtime() - Adjust ptp time
+ * @ptp: PTP clock info
+ * @delta: how much to adjust by, in nanosecs
+ */
+static int cavium_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+{
+ struct cavium_ptp *clock =
+ container_of(ptp_info, struct cavium_ptp, ptp_info);
+ unsigned long flags;
+
+ spin_lock_irqsave(&clock->spin_lock, flags);
+ timecounter_adjtime(&clock->time_counter, delta);
+ spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+ /* Sync, for network driver to get latest value */
+ smp_mb();
+
+ return 0;
+}
+
+/**
+ * cavium_ptp_gettime() - Get hardware clock time with adjustment
+ * @ptp: PTP clock info
+ * @ts: timespec
+ */
+static int cavium_ptp_gettime(struct ptp_clock_info *ptp_info,
+ struct timespec64 *ts)
+{
+ struct cavium_ptp *clock =
+ container_of(ptp_info, struct cavium_ptp, ptp_info);
+ unsigned long flags;
+ u64 nsec;
+
+ spin_lock_irqsave(&clock->spin_lock, flags);
+ nsec = timecounter_read(&clock->time_counter);
+ spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+ *ts = ns_to_timespec64(nsec);
+
+ return 0;
+}
+
+/**
+ * cavium_ptp_settime() - Set hardware clock time. Reset adjustment
+ * @ptp: PTP clock info
+ * @ts: timespec
+ */
+static int cavium_ptp_settime(struct ptp_clock_info *ptp_info,
+ const struct timespec64 *ts)
+{
+ struct cavium_ptp *clock =
+ container_of(ptp_info, struct cavium_ptp, ptp_info);
+ unsigned long flags;
+ u64 nsec;
+
+ nsec = timespec64_to_ns(ts);
+
+ spin_lock_irqsave(&clock->spin_lock, flags);
+ timecounter_init(&clock->time_counter, &clock->cycle_counter, nsec);
+ spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+ return 0;
+}
+
+/**
+ * cavium_ptp_enable() - Request to enable or disable an ancillary feature.
+ * @ptp: PTP clock info
+ * @rq: request
+ * @on: is it on
+ */
+static int cavium_ptp_enable(struct ptp_clock_info *ptp_info,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static u64 cavium_ptp_cc_read(const struct cyclecounter *cc)
+{
+ struct cavium_ptp *clock =
+ container_of(cc, struct cavium_ptp, cycle_counter);
+
+ return readq(clock->reg_base + PTP_CLOCK_HI);
+}
+
+static int cavium_ptp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct cavium_ptp *clock;
+ struct cyclecounter *cc;
+ u64 clock_cfg;
+ u64 clock_comp;
+ int err;
+
+ clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
+ if (!clock) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ clock->pdev = pdev;
+
+ err = pcim_enable_device(pdev);
+ if (err)
+ goto error_free;
+
+ err = pcim_iomap_regions(pdev, 1 << PCI_PTP_BAR_NO, pci_name(pdev));
+ if (err)
+ goto error_free;
+
+ clock->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
+
+ spin_lock_init(&clock->spin_lock);
+
+ cc = &clock->cycle_counter;
+ cc->read = cavium_ptp_cc_read;
+ cc->mask = CYCLECOUNTER_MASK(64);
+ cc->mult = 1;
+ cc->shift = 0;
+
+ timecounter_init(&clock->time_counter, &clock->cycle_counter,
+ ktime_to_ns(ktime_get_real()));
+
+ clock->clock_rate = ptp_cavium_clock_get();
+
+ clock->ptp_info = (struct ptp_clock_info) {
+ .owner = THIS_MODULE,
+ .name = "ThunderX PTP",
+ .max_adj = 1000000000ull,
+ .n_ext_ts = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfine = cavium_ptp_adjfine,
+ .adjtime = cavium_ptp_adjtime,
+ .gettime64 = cavium_ptp_gettime,
+ .settime64 = cavium_ptp_settime,
+ .enable = cavium_ptp_enable,
+ };
+
+ clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG);
+ clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG);
+
+ clock_comp = ((u64)1000000000ull << 32) / clock->clock_rate;
+ writeq(clock_comp, clock->reg_base + PTP_CLOCK_COMP);
+
+ clock->ptp_clock = ptp_clock_register(&clock->ptp_info, dev);
+ if (!clock->ptp_clock) {
+ err = -ENODEV;
+ goto error_stop;
+ }
+ if (IS_ERR(clock->ptp_clock)) {
+ err = PTR_ERR(clock->ptp_clock);
+ goto error_stop;
+ }
+
+ pci_set_drvdata(pdev, clock);
+ return 0;
+
+error_stop:
+ clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG);
+ clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG);
+ pcim_iounmap_regions(pdev, 1 << PCI_PTP_BAR_NO);
+
+error_free:
+ devm_kfree(dev, clock);
+
+error:
+ /* For `cavium_ptp_get()` we need to differentiate between the case
+ * when the core has not tried to probe this device and the case when
+ * the probe failed. In the later case we pretend that the
+ * initialization was successful and keep the error in
+ * `dev->driver_data`.
+ */
+ pci_set_drvdata(pdev, ERR_PTR(err));
+ return 0;
+}
+
+static void cavium_ptp_remove(struct pci_dev *pdev)
+{
+ struct cavium_ptp *clock = pci_get_drvdata(pdev);
+ u64 clock_cfg;
+
+ if (IS_ERR_OR_NULL(clock))
+ return;
+
+ ptp_clock_unregister(clock->ptp_clock);
+
+ clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG);
+ clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG);
+}
+
+static const struct pci_device_id cavium_ptp_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP) },
+ { 0, }
+};
+
+static struct pci_driver cavium_ptp_driver = {
+ .name = DRV_NAME,
+ .id_table = cavium_ptp_id_table,
+ .probe = cavium_ptp_probe,
+ .remove = cavium_ptp_remove,
+};
+
+static int __init cavium_ptp_init_module(void)
+{
+ return pci_register_driver(&cavium_ptp_driver);
+}
+
+static void __exit cavium_ptp_cleanup_module(void)
+{
+ pci_unregister_driver(&cavium_ptp_driver);
+}
+
+module_init(cavium_ptp_init_module);
+module_exit(cavium_ptp_cleanup_module);
+
+MODULE_DESCRIPTION(DRV_NAME);
+MODULE_AUTHOR("Cavium Networks <support@cavium.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, cavium_ptp_id_table);
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.h b/drivers/net/ethernet/cavium/common/cavium_ptp.h
new file mode 100644
index 000000000000..be2bafc7beeb
--- /dev/null
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.h
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/* cavium_ptp.h - PTP 1588 clock on Cavium hardware
+ * Copyright (c) 2003-2015, 2017 Cavium, Inc.
+ */
+
+#ifndef CAVIUM_PTP_H
+#define CAVIUM_PTP_H
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+
+struct cavium_ptp {
+ struct pci_dev *pdev;
+
+ /* Serialize access to cycle_counter, time_counter and hw_registers */
+ spinlock_t spin_lock;
+ struct cyclecounter cycle_counter;
+ struct timecounter time_counter;
+ void __iomem *reg_base;
+
+ u32 clock_rate;
+
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+};
+
+#if IS_ENABLED(CONFIG_CAVIUM_PTP)
+
+struct cavium_ptp *cavium_ptp_get(void);
+void cavium_ptp_put(struct cavium_ptp *ptp);
+
+static inline u64 cavium_ptp_tstamp2time(struct cavium_ptp *ptp, u64 tstamp)
+{
+ unsigned long flags;
+ u64 ret;
+
+ spin_lock_irqsave(&ptp->spin_lock, flags);
+ ret = timecounter_cyc2time(&ptp->time_counter, tstamp);
+ spin_unlock_irqrestore(&ptp->spin_lock, flags);
+
+ return ret;
+}
+
+static inline int cavium_ptp_clock_index(struct cavium_ptp *clock)
+{
+ return ptp_clock_index(clock->ptp_clock);
+}
+
+#else
+
+static inline struct cavium_ptp *cavium_ptp_get(void)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void cavium_ptp_put(struct cavium_ptp *ptp) {}
+
+static inline u64 cavium_ptp_tstamp2time(struct cavium_ptp *ptp, u64 tstamp)
+{
+ return 0;
+}
+
+static inline int cavium_ptp_clock_index(struct cavium_ptp *clock)
+{
+ return -1;
+}
+
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 2c615ab09e64..f38abf626412 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -702,12 +702,10 @@ static struct octeon_device *octeon_allocate_device_mem(u32 pci_id,
size = octdevsize + priv_size + configsize +
(sizeof(struct octeon_dispatch) * DISPATCH_LIST_SIZE);
- buf = vmalloc(size);
+ buf = vzalloc(size);
if (!buf)
return NULL;
- memset(buf, 0, size);
-
oct = (struct octeon_device *)buf;
oct->priv = (void *)(buf + octdevsize);
oct->chip = (void *)(buf + octdevsize + priv_size);
@@ -840,10 +838,9 @@ octeon_allocate_ioq_vector(struct octeon_device *oct)
size = sizeof(struct octeon_ioq_vector) * num_ioqs;
- oct->ioq_vector = vmalloc(size);
+ oct->ioq_vector = vzalloc(size);
if (!oct->ioq_vector)
return 1;
- memset(oct->ioq_vector, 0, size);
for (i = 0; i < num_ioqs; i++) {
ioq_vector = &oct->ioq_vector[i];
ioq_vector->oct_dev = oct;
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 4a02e618e318..4cacce5d2b16 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -263,6 +263,8 @@ struct nicvf_drv_stats {
struct u64_stats_sync syncp;
};
+struct cavium_ptp;
+
struct nicvf {
struct nicvf *pnicvf;
struct net_device *netdev;
@@ -312,6 +314,33 @@ struct nicvf {
struct tasklet_struct qs_err_task;
struct work_struct reset_task;
+ /* PTP timestamp */
+ struct cavium_ptp *ptp_clock;
+ /* Inbound timestamping is on */
+ bool hw_rx_tstamp;
+ /* When the packet that requires timestamping is sent, hardware inserts
+ * two entries to the completion queue. First is the regular
+ * CQE_TYPE_SEND entry that signals that the packet was sent.
+ * The second is CQE_TYPE_SEND_PTP that contains the actual timestamp
+ * for that packet.
+ * `ptp_skb` is initialized in the handler for the CQE_TYPE_SEND
+ * entry and is used and zeroed in the handler for the CQE_TYPE_SEND_PTP
+ * entry.
+ * So `ptp_skb` is used to hold the pointer to the packet between
+ * the calls to CQE_TYPE_SEND and CQE_TYPE_SEND_PTP handlers.
+ */
+ struct sk_buff *ptp_skb;
+ /* `tx_ptp_skbs` is set when the hardware is sending a packet that
+ * requires timestamping. Cavium hardware can not process more than one
+ * such packet at once so this is set each time the driver submits
+ * a packet that requires timestamping to the send queue and clears
+ * each time it receives the entry on the completion queue saying
+ * that such packet was sent.
+ * So `tx_ptp_skbs` prevents driver from submitting more than one
+ * packet that requires timestamping to the hardware for transmitting.
+ */
+ atomic_t tx_ptp_skbs;
+
/* Interrupt coalescing settings */
u32 cq_coalesce_usecs;
u32 msg_enable;
@@ -371,6 +400,7 @@ struct nicvf {
#define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */
#define NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17 /* Reset statistics counters */
#define NIC_MBOX_MSG_PFC 0x18 /* Pause frame control */
+#define NIC_MBOX_MSG_PTP_CFG 0x19 /* HW packet timestamp */
#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */
#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
@@ -521,6 +551,11 @@ struct pfc {
u8 fc_tx;
};
+struct set_ptp {
+ u8 msg;
+ bool enable;
+};
+
/* 128 bit shared memory between PF and each VF */
union nic_mbx {
struct { u8 msg; } msg;
@@ -540,6 +575,7 @@ union nic_mbx {
struct set_loopback lbk;
struct reset_stat_cfg reset_stat;
struct pfc pfc;
+ struct set_ptp ptp;
};
#define NIC_NODE_ID_MASK 0x03
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 8f1dd55b3e08..7ff66a8194e2 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -18,7 +18,7 @@
#include "q_struct.h"
#include "thunder_bgx.h"
-#define DRV_NAME "thunder-nic"
+#define DRV_NAME "nicpf"
#define DRV_VERSION "1.0"
struct hw_info {
@@ -426,13 +426,22 @@ static void nic_init_hw(struct nicpf *nic)
/* Enable backpressure */
nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03);
- /* TNS and TNS bypass modes are present only on 88xx */
+ /* TNS and TNS bypass modes are present only on 88xx
+ * Also offset of this CSR has changed in 81xx and 83xx.
+ */
if (nic->pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF) {
/* Disable TNS mode on both interfaces */
nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG,
- (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK);
+ (NIC_TNS_BYPASS_MODE << 7) |
+ BGX0_BLOCK | (1ULL << 16));
nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8),
- (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK);
+ (NIC_TNS_BYPASS_MODE << 7) |
+ BGX1_BLOCK | (1ULL << 16));
+ } else {
+ /* Configure timestamp generation timeout to 10us */
+ for (i = 0; i < nic->hw->bgx_cnt; i++)
+ nic_reg_write(nic, NIC_PF_INTFX_SEND_CFG | (i << 3),
+ (1ULL << 16));
}
nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG,
@@ -880,6 +889,44 @@ static void nic_pause_frame(struct nicpf *nic, int vf, struct pfc *cfg)
}
}
+/* Enable or disable HW timestamping by BGX for pkts received on a LMAC */
+static void nic_config_timestamp(struct nicpf *nic, int vf, struct set_ptp *ptp)
+{
+ struct pkind_cfg *pkind;
+ u8 lmac, bgx_idx;
+ u64 pkind_val, pkind_idx;
+
+ if (vf >= nic->num_vf_en)
+ return;
+
+ bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+ pkind_idx = lmac + bgx_idx * MAX_LMAC_PER_BGX;
+ pkind_val = nic_reg_read(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3));
+ pkind = (struct pkind_cfg *)&pkind_val;
+
+ if (ptp->enable && !pkind->hdr_sl) {
+ /* Skiplen to exclude 8byte timestamp while parsing pkt
+ * If not configured, will result in L2 errors.
+ */
+ pkind->hdr_sl = 4;
+ /* Adjust max packet length allowed */
+ pkind->maxlen += (pkind->hdr_sl * 2);
+ bgx_config_timestamping(nic->node, bgx_idx, lmac, true);
+ nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7 | (1 << 3),
+ (ETYPE_ALG_ENDPARSE << 16) | ETH_P_1588);
+ } else if (!ptp->enable && pkind->hdr_sl) {
+ pkind->maxlen -= (pkind->hdr_sl * 2);
+ pkind->hdr_sl = 0;
+ bgx_config_timestamping(nic->node, bgx_idx, lmac, false);
+ nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7 | (1 << 3),
+ (ETYPE_ALG_SKIP << 16) | ETH_P_8021Q);
+ }
+
+ nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val);
+}
+
/* Interrupt handler to handle mailbox messages from VFs */
static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
{
@@ -1022,6 +1069,9 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
case NIC_MBOX_MSG_PFC:
nic_pause_frame(nic, vf, &mbx.pfc);
goto unlock;
+ case NIC_MBOX_MSG_PTP_CFG:
+ nic_config_timestamp(nic, vf, &mbx.ptp);
+ break;
default:
dev_err(&nic->pdev->dev,
"Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
index 80d46337cf29..a16c48a1ebb2 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -99,6 +99,7 @@
#define NIC_PF_ECC3_DBE_INT_W1S (0x2708)
#define NIC_PF_ECC3_DBE_ENA_W1C (0x2710)
#define NIC_PF_ECC3_DBE_ENA_W1S (0x2718)
+#define NIC_PF_INTFX_SEND_CFG (0x4000)
#define NIC_PF_MCAM_0_191_ENA (0x100000)
#define NIC_PF_MCAM_0_191_M_0_5_DATA (0x110000)
#define NIC_PF_MCAM_CTRL (0x120000)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index b9ece9cbf98b..5603f5ab1fee 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -9,14 +9,16 @@
/* ETHTOOL Support for VNIC_VF Device*/
#include <linux/pci.h>
+#include <linux/net_tstamp.h>
#include "nic_reg.h"
#include "nic.h"
#include "nicvf_queues.h"
#include "q_struct.h"
#include "thunder_bgx.h"
+#include "../common/cavium_ptp.h"
-#define DRV_NAME "thunder-nicvf"
+#define DRV_NAME "nicvf"
#define DRV_VERSION "1.0"
struct nicvf_stat {
@@ -824,6 +826,31 @@ static int nicvf_set_pauseparam(struct net_device *dev,
return 0;
}
+static int nicvf_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ if (!nic->ptp_clock)
+ return ethtool_op_get_ts_info(netdev, info);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = cavium_ptp_clock_index(nic->ptp_clock);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
static const struct ethtool_ops nicvf_ethtool_ops = {
.get_link = nicvf_get_link,
.get_drvinfo = nicvf_get_drvinfo,
@@ -847,7 +874,7 @@ static const struct ethtool_ops nicvf_ethtool_ops = {
.set_channels = nicvf_set_channels,
.get_pauseparam = nicvf_get_pauseparam,
.set_pauseparam = nicvf_set_pauseparam,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = nicvf_get_ts_info,
.get_link_ksettings = nicvf_get_link_ksettings,
};
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index a063c36c4c58..b68cde9f17d2 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -20,13 +20,15 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/filter.h>
+#include <linux/net_tstamp.h>
#include "nic_reg.h"
#include "nic.h"
#include "nicvf_queues.h"
#include "thunder_bgx.h"
+#include "../common/cavium_ptp.h"
-#define DRV_NAME "thunder-nicvf"
+#define DRV_NAME "nicvf"
#define DRV_VERSION "1.0"
/* Supported devices */
@@ -65,6 +67,11 @@ module_param(cpi_alg, int, S_IRUGO);
MODULE_PARM_DESC(cpi_alg,
"PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
+struct nicvf_xdp_tx {
+ u64 dma_addr;
+ u8 qidx;
+};
+
static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
{
if (nic->sqs_mode)
@@ -500,14 +507,29 @@ static int nicvf_init_resources(struct nicvf *nic)
return 0;
}
+static void nicvf_unmap_page(struct nicvf *nic, struct page *page, u64 dma_addr)
+{
+ /* Check if it's a recycled page, if not unmap the DMA mapping.
+ * Recycled page holds an extra reference.
+ */
+ if (page_ref_count(page) == 1) {
+ dma_addr &= PAGE_MASK;
+ dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
+ RCV_FRAG_LEN + XDP_HEADROOM,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
+}
+
static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
- struct sk_buff **skb)
+ struct rcv_queue *rq, struct sk_buff **skb)
{
struct xdp_buff xdp;
struct page *page;
+ struct nicvf_xdp_tx *xdp_tx = NULL;
u32 action;
- u16 len, offset = 0;
+ u16 len, err, offset = 0;
u64 dma_addr, cpu_addr;
void *orig_data;
@@ -521,10 +543,11 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
cpu_addr = (u64)phys_to_virt(cpu_addr);
page = virt_to_page((void *)cpu_addr);
- xdp.data_hard_start = page_address(page);
+ xdp.data_hard_start = page_address(page) + RCV_BUF_HEADROOM;
xdp.data = (void *)cpu_addr;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
+ xdp.rxq = &rq->xdp_rxq;
orig_data = xdp.data;
rcu_read_lock();
@@ -540,18 +563,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
switch (action) {
case XDP_PASS:
- /* Check if it's a recycled page, if not
- * unmap the DMA mapping.
- *
- * Recycled page holds an extra reference.
- */
- if (page_ref_count(page) == 1) {
- dma_addr &= PAGE_MASK;
- dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
- RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
- DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
- }
+ nicvf_unmap_page(nic, page, dma_addr);
/* Build SKB and pass on packet to network stack */
*skb = build_skb(xdp.data,
@@ -564,6 +576,20 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
case XDP_TX:
nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len);
return true;
+ case XDP_REDIRECT:
+ /* Save DMA address for use while transmitting */
+ xdp_tx = (struct nicvf_xdp_tx *)page_address(page);
+ xdp_tx->dma_addr = dma_addr;
+ xdp_tx->qidx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx);
+
+ err = xdp_do_redirect(nic->pnicvf->netdev, &xdp, prog);
+ if (!err)
+ return true;
+
+ /* Free the page on error */
+ nicvf_unmap_page(nic, page, dma_addr);
+ put_page(page);
+ break;
default:
bpf_warn_invalid_xdp_action(action);
/* fall through */
@@ -571,24 +597,51 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
trace_xdp_exception(nic->netdev, prog, action);
/* fall through */
case XDP_DROP:
- /* Check if it's a recycled page, if not
- * unmap the DMA mapping.
- *
- * Recycled page holds an extra reference.
- */
- if (page_ref_count(page) == 1) {
- dma_addr &= PAGE_MASK;
- dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
- RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
- DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
- }
+ nicvf_unmap_page(nic, page, dma_addr);
put_page(page);
return true;
}
return false;
}
+static void nicvf_snd_ptp_handler(struct net_device *netdev,
+ struct cqe_send_t *cqe_tx)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ struct skb_shared_hwtstamps ts;
+ u64 ns;
+
+ nic = nic->pnicvf;
+
+ /* Sync for 'ptp_skb' */
+ smp_rmb();
+
+ /* New timestamp request can be queued now */
+ atomic_set(&nic->tx_ptp_skbs, 0);
+
+ /* Check for timestamp requested skb */
+ if (!nic->ptp_skb)
+ return;
+
+ /* Check if timestamping is timedout, which is set to 10us */
+ if (cqe_tx->send_status == CQ_TX_ERROP_TSTMP_TIMEOUT ||
+ cqe_tx->send_status == CQ_TX_ERROP_TSTMP_CONFLICT)
+ goto no_tstamp;
+
+ /* Get the timestamp */
+ memset(&ts, 0, sizeof(ts));
+ ns = cavium_ptp_tstamp2time(nic->ptp_clock, cqe_tx->ptp_timestamp);
+ ts.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(nic->ptp_skb, &ts);
+
+no_tstamp:
+ /* Free the original skb */
+ dev_kfree_skb_any(nic->ptp_skb);
+ nic->ptp_skb = NULL;
+ /* Sync 'ptp_skb' */
+ smp_wmb();
+}
+
static void nicvf_snd_pkt_handler(struct net_device *netdev,
struct cqe_send_t *cqe_tx,
int budget, int *subdesc_cnt,
@@ -645,7 +698,12 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
prefetch(skb);
(*tx_pkts)++;
*tx_bytes += skb->len;
- napi_consume_skb(skb, budget);
+ /* If timestamp is requested for this skb, don't free it */
+ if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
+ !nic->pnicvf->ptp_skb)
+ nic->pnicvf->ptp_skb = skb;
+ else
+ napi_consume_skb(skb, budget);
sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
} else {
/* In case of SW TSO on 88xx, only last segment will have
@@ -684,9 +742,25 @@ static inline void nicvf_set_rxhash(struct net_device *netdev,
skb_set_hash(skb, hash, hash_type);
}
+static inline void nicvf_set_rxtstamp(struct nicvf *nic, struct sk_buff *skb)
+{
+ u64 ns;
+
+ if (!nic->ptp_clock || !nic->hw_rx_tstamp)
+ return;
+
+ /* The first 8 bytes is the timestamp */
+ ns = cavium_ptp_tstamp2time(nic->ptp_clock,
+ be64_to_cpu(*(__be64 *)skb->data));
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
+
+ __skb_pull(skb, 8);
+}
+
static void nicvf_rcv_pkt_handler(struct net_device *netdev,
struct napi_struct *napi,
- struct cqe_rx_t *cqe_rx, struct snd_queue *sq)
+ struct cqe_rx_t *cqe_rx,
+ struct snd_queue *sq, struct rcv_queue *rq)
{
struct sk_buff *skb = NULL;
struct nicvf *nic = netdev_priv(netdev);
@@ -712,7 +786,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
/* For XDP, ignore pkts spanning multiple pages */
if (nic->xdp_prog && (cqe_rx->rb_cnt == 1)) {
/* Packet consumed by XDP */
- if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, &skb))
+ if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, rq, &skb))
return;
} else {
skb = nicvf_get_rcv_skb(snic, cqe_rx,
@@ -734,6 +808,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
return;
}
+ nicvf_set_rxtstamp(nic, skb);
nicvf_set_rxhash(netdev, cqe_rx, skb);
skb_record_rx_queue(skb, rq_idx);
@@ -769,6 +844,7 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
struct cqe_rx_t *cq_desc;
struct netdev_queue *txq;
struct snd_queue *sq = &qs->sq[cq_idx];
+ struct rcv_queue *rq = &qs->rq[cq_idx];
unsigned int tx_pkts = 0, tx_bytes = 0, txq_idx;
spin_lock_bh(&cq->lock);
@@ -799,7 +875,7 @@ loop:
switch (cq_desc->cqe_type) {
case CQE_TYPE_RX:
- nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq);
+ nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq, rq);
work_done++;
break;
case CQE_TYPE_SEND:
@@ -808,10 +884,12 @@ loop:
&tx_pkts, &tx_bytes);
tx_done++;
break;
+ case CQE_TYPE_SEND_PTP:
+ nicvf_snd_ptp_handler(netdev, (void *)cq_desc);
+ break;
case CQE_TYPE_INVALID:
case CQE_TYPE_RX_SPLIT:
case CQE_TYPE_RX_TCP:
- case CQE_TYPE_SEND_PTP:
/* Ignore for now */
break;
}
@@ -1307,12 +1385,28 @@ int nicvf_stop(struct net_device *netdev)
nicvf_free_cq_poll(nic);
+ /* Free any pending SKB saved to receive timestamp */
+ if (nic->ptp_skb) {
+ dev_kfree_skb_any(nic->ptp_skb);
+ nic->ptp_skb = NULL;
+ }
+
/* Clear multiqset info */
nic->pnicvf = nic;
return 0;
}
+static int nicvf_config_hw_rx_tstamp(struct nicvf *nic, bool enable)
+{
+ union nic_mbx mbx = {};
+
+ mbx.ptp.msg = NIC_MBOX_MSG_PTP_CFG;
+ mbx.ptp.enable = enable;
+
+ return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
{
union nic_mbx mbx = {};
@@ -1382,6 +1476,12 @@ int nicvf_open(struct net_device *netdev)
if (nic->sqs_mode)
nicvf_get_primary_vf_struct(nic);
+ /* Configure PTP timestamp */
+ if (nic->ptp_clock)
+ nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp);
+ atomic_set(&nic->tx_ptp_skbs, 0);
+ nic->ptp_skb = NULL;
+
/* Configure receive side scaling and MTU */
if (!nic->sqs_mode) {
nicvf_rss_init(nic);
@@ -1764,6 +1864,117 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
}
}
+static int nicvf_xdp_xmit(struct net_device *netdev, struct xdp_buff *xdp)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ struct nicvf *snic = nic;
+ struct nicvf_xdp_tx *xdp_tx;
+ struct snd_queue *sq;
+ struct page *page;
+ int err, qidx;
+
+ if (!netif_running(netdev) || !nic->xdp_prog)
+ return -EINVAL;
+
+ page = virt_to_page(xdp->data);
+ xdp_tx = (struct nicvf_xdp_tx *)page_address(page);
+ qidx = xdp_tx->qidx;
+
+ if (xdp_tx->qidx >= nic->xdp_tx_queues)
+ return -EINVAL;
+
+ /* Get secondary Qset's info */
+ if (xdp_tx->qidx >= MAX_SND_QUEUES_PER_QS) {
+ qidx = xdp_tx->qidx / MAX_SND_QUEUES_PER_QS;
+ snic = (struct nicvf *)nic->snicvf[qidx - 1];
+ if (!snic)
+ return -EINVAL;
+ qidx = xdp_tx->qidx % MAX_SND_QUEUES_PER_QS;
+ }
+
+ sq = &snic->qs->sq[qidx];
+ err = nicvf_xdp_sq_append_pkt(snic, sq, (u64)xdp->data,
+ xdp_tx->dma_addr,
+ xdp->data_end - xdp->data);
+ if (err)
+ return -ENOMEM;
+
+ nicvf_xdp_sq_doorbell(snic, sq, qidx);
+ return 0;
+}
+
+static void nicvf_xdp_flush(struct net_device *dev)
+{
+ return;
+}
+
+static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ struct nicvf *nic = netdev_priv(netdev);
+
+ if (!nic->ptp_clock)
+ return -ENODEV;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ nic->hw_rx_tstamp = false;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ nic->hw_rx_tstamp = true;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (netif_running(netdev))
+ nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp);
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int nicvf_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+{
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return nicvf_config_hwtstamp(netdev, req);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops nicvf_netdev_ops = {
.ndo_open = nicvf_open,
.ndo_stop = nicvf_stop,
@@ -1775,6 +1986,9 @@ static const struct net_device_ops nicvf_netdev_ops = {
.ndo_fix_features = nicvf_fix_features,
.ndo_set_features = nicvf_set_features,
.ndo_bpf = nicvf_xdp,
+ .ndo_xdp_xmit = nicvf_xdp_xmit,
+ .ndo_xdp_flush = nicvf_xdp_flush,
+ .ndo_do_ioctl = nicvf_ioctl,
};
static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -1784,6 +1998,16 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct nicvf *nic;
int err, qcount;
u16 sdevid;
+ struct cavium_ptp *ptp_clock;
+
+ ptp_clock = cavium_ptp_get();
+ if (IS_ERR(ptp_clock)) {
+ if (PTR_ERR(ptp_clock) == -ENODEV)
+ /* In virtualized environment we proceed without ptp */
+ ptp_clock = NULL;
+ else
+ return PTR_ERR(ptp_clock);
+ }
err = pci_enable_device(pdev);
if (err) {
@@ -1833,6 +2057,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
nic->pdev = pdev;
nic->pnicvf = nic;
nic->max_queues = qcount;
+ /* If no of CPUs are too low, there won't be any queues left
+ * for XDP_TX, hence double it.
+ */
+ if (!nic->t88)
+ nic->max_queues *= 2;
+ nic->ptp_clock = ptp_clock;
/* MAP VF's configuration registers */
nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
@@ -1946,6 +2176,7 @@ static void nicvf_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
if (nic->drv_stats)
free_percpu(nic->drv_stats);
+ cavium_ptp_put(nic->ptp_clock);
free_netdev(netdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index a3d12dbde95b..3eae9ff9b53a 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -204,7 +204,7 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
/* Reserve space for header modifications by BPF program */
if (rbdr->is_xdp)
- buf_len += XDP_PACKET_HEADROOM;
+ buf_len += XDP_HEADROOM;
/* Check if it's recycled */
if (pgcache)
@@ -224,8 +224,9 @@ ret:
nic->rb_page = NULL;
return -ENOMEM;
}
+
if (pgcache)
- pgcache->dma_addr = *rbuf + XDP_PACKET_HEADROOM;
+ pgcache->dma_addr = *rbuf + XDP_HEADROOM;
nic->rb_page_offset += buf_len;
}
@@ -759,6 +760,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
if (!rq->enable) {
nicvf_reclaim_rcv_queue(nic, qs, qidx);
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
return;
}
@@ -771,6 +773,9 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
/* all writes of RBDR data to be loaded into L2 Cache as well*/
rq->caching = 1;
+ /* Driver have no proper error path for failed XDP RX-queue info reg */
+ WARN_ON(xdp_rxq_info_reg(&rq->xdp_rxq, nic->netdev, qidx) < 0);
+
/* Send a mailbox msg to PF to config RQ */
mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
mbx.rq.qs_num = qs->vnic_id;
@@ -977,6 +982,9 @@ void nicvf_qset_config(struct nicvf *nic, bool enable)
qs_cfg->be = 1;
#endif
qs_cfg->vnic = qs->vnic_id;
+ /* Enable Tx timestamping capability */
+ if (nic->ptp_clock)
+ qs_cfg->send_tstmp_ena = 1;
}
nicvf_send_msg_to_pf(nic, &mbx);
}
@@ -1236,7 +1244,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
int qentry;
if (subdesc_cnt > sq->xdp_free_cnt)
- return 0;
+ return -1;
qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
@@ -1247,7 +1255,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
sq->xdp_desc_cnt += subdesc_cnt;
- return 1;
+ return 0;
}
/* Calculate no of SQ subdescriptors needed to transmit all
@@ -1384,6 +1392,29 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
hdr->inner_l3_offset = skb_network_offset(skb) - 2;
this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
}
+
+ /* Check if timestamp is requested */
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ skb_tx_timestamp(skb);
+ return;
+ }
+
+ /* Tx timestamping not supported along with TSO, so ignore request */
+ if (skb_shinfo(skb)->gso_size)
+ return;
+
+ /* HW supports only a single outstanding packet to timestamp */
+ if (!atomic_add_unless(&nic->pnicvf->tx_ptp_skbs, 1, 1))
+ return;
+
+ /* Mark the SKB for later reference */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ /* Finally enable timestamp generation
+ * Since 'post_cqe' is also set, two CQEs will be posted
+ * for this packet i.e CQE_TYPE_SEND and CQE_TYPE_SEND_PTP.
+ */
+ hdr->tstmp = 1;
}
/* SQ GATHER subdescriptor
@@ -1625,7 +1656,7 @@ static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr,
if (page_ref_count(page) != 1)
return;
- len += XDP_PACKET_HEADROOM;
+ len += XDP_HEADROOM;
/* Receive buffers in XDP mode are mapped from page start */
dma_addr &= PAGE_MASK;
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 67d1a3230773..7d1e4e2aaad0 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -11,6 +11,8 @@
#include <linux/netdevice.h>
#include <linux/iommu.h>
+#include <linux/bpf.h>
+#include <net/xdp.h>
#include "q_struct.h"
#define MAX_QUEUE_SET 128
@@ -92,6 +94,9 @@
#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define RCV_BUF_HEADROOM 128 /* To store dma address for XDP redirect */
+#define XDP_HEADROOM (XDP_PACKET_HEADROOM + RCV_BUF_HEADROOM)
+
#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
MAX_CQE_PER_PKT_XMIT)
@@ -251,6 +256,7 @@ struct rcv_queue {
u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */
u8 caching;
struct rx_tx_queue_stats stats;
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_aligned_in_smp;
struct cmp_queue {
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 5e5c4d7796b8..91d34ea40e2c 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -21,7 +21,7 @@
#include "nic.h"
#include "thunder_bgx.h"
-#define DRV_NAME "thunder-BGX"
+#define DRV_NAME "thunder_bgx"
#define DRV_VERSION "1.0"
struct lmac {
@@ -245,6 +245,35 @@ void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
}
EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
+/* Enables or disables timestamp insertion by BGX for Rx packets */
+void bgx_config_timestamping(int node, int bgx_idx, int lmacid, bool enable)
+{
+ struct bgx *bgx = get_bgx(node, bgx_idx);
+ struct lmac *lmac;
+ u64 csr_offset, cfg;
+
+ if (!bgx)
+ return;
+
+ lmac = &bgx->lmac[lmacid];
+
+ if (lmac->lmac_type == BGX_MODE_SGMII ||
+ lmac->lmac_type == BGX_MODE_QSGMII ||
+ lmac->lmac_type == BGX_MODE_RGMII)
+ csr_offset = BGX_GMP_GMI_RXX_FRM_CTL;
+ else
+ csr_offset = BGX_SMUX_RX_FRM_CTL;
+
+ cfg = bgx_reg_read(bgx, lmacid, csr_offset);
+
+ if (enable)
+ cfg |= BGX_PKT_RX_PTP_EN;
+ else
+ cfg &= ~BGX_PKT_RX_PTP_EN;
+ bgx_reg_write(bgx, lmacid, csr_offset, cfg);
+}
+EXPORT_SYMBOL(bgx_config_timestamping);
+
void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause)
{
struct pfc *pfc = (struct pfc *)pause;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 23acdc5ab896..5a7567d31138 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -122,6 +122,8 @@
#define SPU_DBG_CTL_AN_NONCE_MCT_DIS BIT_ULL(29)
#define BGX_SMUX_RX_INT 0x20000
+#define BGX_SMUX_RX_FRM_CTL 0x20020
+#define BGX_PKT_RX_PTP_EN BIT_ULL(12)
#define BGX_SMUX_RX_JABBER 0x20030
#define BGX_SMUX_RX_CTL 0x20048
#define SMU_RX_CTL_STATUS (3ull << 0)
@@ -172,6 +174,7 @@
#define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8)
#define GMI_PORT_CFG_RX_IDLE BIT_ULL(12)
#define GMI_PORT_CFG_TX_IDLE BIT_ULL(13)
+#define BGX_GMP_GMI_RXX_FRM_CTL 0x38028
#define BGX_GMP_GMI_RXX_JABBER 0x38038
#define BGX_GMP_GMI_TXX_THRESH 0x38210
#define BGX_GMP_GMI_TXX_APPEND 0x38218
@@ -223,6 +226,7 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status);
void bgx_lmac_internal_loopback(int node, int bgx_idx,
int lmac_idx, bool enable);
+void bgx_config_timestamping(int node, int bgx_idx, int lmacid, bool enable);
void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause);
void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
index 578c7f8f11bf..2d5e8dab1f70 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
@@ -20,7 +20,7 @@
#include "nic.h"
#include "thunder_bgx.h"
-#define DRV_NAME "thunder-xcv"
+#define DRV_NAME "thunder_xcv"
#define DRV_VERSION "1.0"
/* Register offsets */
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index 5713e83be08c..e2cdfa75673f 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -69,6 +69,7 @@ config CHELSIO_T4
depends on PCI && (IPV6 || IPV6=n)
select FW_LOADER
select MDIO
+ select ZLIB_DEFLATE
---help---
This driver supports Chelsio T4, T5 & T6 based gigabit, 10Gb Ethernet
adapter and T5/T6 based 40Gb and T6 based 25Gb, 50Gb and 100Gb
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 6a015362c340..185fe8df7628 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3304,6 +3304,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->ethtool_ops = &cxgb_ethtool_ops;
netdev->min_mtu = 81;
netdev->max_mtu = ETH_MAX_MTU;
+ netdev->dev_port = pi->port_id;
}
pci_set_drvdata(pdev, adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index 8c9c6b0d2e5d..53b6a02c778e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \
cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \
cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o \
- cudbg_common.o cudbg_lib.o
+ cudbg_common.o cudbg_lib.o cudbg_zlib.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c
index f78ba1743b5a..8edc49827af0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c
@@ -19,7 +19,8 @@
#include "cudbg_if.h"
#include "cudbg_lib_common.h"
-int cudbg_get_buff(struct cudbg_buffer *pdbg_buff, u32 size,
+int cudbg_get_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pdbg_buff, u32 size,
struct cudbg_buffer *pin_buff)
{
u32 offset;
@@ -28,17 +29,30 @@ int cudbg_get_buff(struct cudbg_buffer *pdbg_buff, u32 size,
if (offset + size > pdbg_buff->size)
return CUDBG_STATUS_NO_MEM;
+ if (pdbg_init->compress_type != CUDBG_COMPRESSION_NONE) {
+ if (size > pdbg_init->compress_buff_size)
+ return CUDBG_STATUS_NO_MEM;
+
+ pin_buff->data = (char *)pdbg_init->compress_buff;
+ pin_buff->offset = 0;
+ pin_buff->size = size;
+ return 0;
+ }
+
pin_buff->data = (char *)pdbg_buff->data + offset;
pin_buff->offset = offset;
pin_buff->size = size;
- pdbg_buff->size -= size;
return 0;
}
-void cudbg_put_buff(struct cudbg_buffer *pin_buff,
- struct cudbg_buffer *pdbg_buff)
+void cudbg_put_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff)
{
- pdbg_buff->size += pin_buff->size;
+ /* Clear compression buffer for re-use */
+ if (pdbg_init->compress_type != CUDBG_COMPRESSION_NONE)
+ memset(pdbg_init->compress_buff, 0,
+ pdbg_init->compress_buff_size);
+
pin_buff->data = NULL;
pin_buff->offset = 0;
pin_buff->size = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
index 605689957496..b57acb8dc35b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
@@ -18,17 +18,15 @@
#ifndef __CUDBG_ENTITY_H__
#define __CUDBG_ENTITY_H__
-#define EDC0_FLAG 3
-#define EDC1_FLAG 4
+#define EDC0_FLAG 0
+#define EDC1_FLAG 1
+#define MC_FLAG 2
+#define MC0_FLAG 3
+#define MC1_FLAG 4
+#define HMA_FLAG 5
#define CUDBG_ENTITY_SIGNATURE 0xCCEDB001
-struct card_mem {
- u16 size_edc0;
- u16 size_edc1;
- u16 mem_flag;
-};
-
struct cudbg_mbox_log {
struct mbox_cmd entry;
u32 hi[MBOX_LEN / 8];
@@ -87,6 +85,48 @@ struct cudbg_tp_la {
u8 data[0];
};
+static const char * const cudbg_region[] = {
+ "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
+ "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
+ "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
+ "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
+ "RQUDP region:", "PBL region:", "TXPBL region:",
+ "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
+ "On-chip queues:"
+};
+
+/* Memory region info relative to current memory (i.e. wrt 0). */
+struct cudbg_region_info {
+ bool exist; /* Does region exists in current memory? */
+ u32 start; /* Start wrt 0 */
+ u32 end; /* End wrt 0 */
+};
+
+struct cudbg_mem_desc {
+ u32 base;
+ u32 limit;
+ u32 idx;
+};
+
+struct cudbg_meminfo {
+ struct cudbg_mem_desc avail[4];
+ struct cudbg_mem_desc mem[ARRAY_SIZE(cudbg_region) + 3];
+ u32 avail_c;
+ u32 mem_c;
+ u32 up_ram_lo;
+ u32 up_ram_hi;
+ u32 up_extmem2_lo;
+ u32 up_extmem2_hi;
+ u32 rx_pages_data[3];
+ u32 tx_pages_data[4];
+ u32 p_structs;
+ u32 reserved[12];
+ u32 port_used[4];
+ u32 port_alloc[4];
+ u32 loopback_used[NCHAN];
+ u32 loopback_alloc[NCHAN];
+};
+
struct cudbg_cim_pif_la {
int size;
u8 data[0];
@@ -145,6 +185,7 @@ struct cudbg_tid_info_region_rev1 {
u32 reserved[16];
};
+#define CUDBG_LOWMEM_MAX_CTXT_QIDS 256
#define CUDBG_MAX_FL_QIDS 1024
struct cudbg_ch_cntxt {
@@ -334,6 +375,25 @@ static const u32 t5_pm_tx_array[][IREG_NUM_ELEM] = {
{0x8FF0, 0x8FF4, 0x10021, 0x1D}, /* t5_pm_tx_regs_10021_to_1003c */
};
+#define CUDBG_NUM_PCIE_CONFIG_REGS 0x61
+
+static const u32 t5_pcie_config_array[][2] = {
+ {0x0, 0x34},
+ {0x3c, 0x40},
+ {0x50, 0x64},
+ {0x70, 0x80},
+ {0x94, 0xa0},
+ {0xb0, 0xb8},
+ {0xd0, 0xd4},
+ {0x100, 0x128},
+ {0x140, 0x148},
+ {0x150, 0x164},
+ {0x170, 0x178},
+ {0x180, 0x194},
+ {0x1a0, 0x1b8},
+ {0x1c0, 0x208},
+};
+
static const u32 t6_ma_ireg_array[][IREG_NUM_ELEM] = {
{0x78f8, 0x78fc, 0xa000, 23}, /* t6_ma_regs_a000_to_a016 */
{0x78f8, 0x78fc, 0xa400, 30}, /* t6_ma_regs_a400_to_a41e */
@@ -345,37 +405,55 @@ static const u32 t6_ma_ireg_array2[][IREG_NUM_ELEM] = {
{0x78f8, 0x78fc, 0xe640, 13} /* t6_ma_regs_e640_to_e7c0 */
};
-static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM] = {
- {0x7b50, 0x7b54, 0x2000, 0x20}, /* up_cim_2000_to_207c */
- {0x7b50, 0x7b54, 0x2080, 0x1d}, /* up_cim_2080_to_20fc */
- {0x7b50, 0x7b54, 0x00, 0x20}, /* up_cim_00_to_7c */
- {0x7b50, 0x7b54, 0x80, 0x20}, /* up_cim_80_to_fc */
- {0x7b50, 0x7b54, 0x100, 0x11}, /* up_cim_100_to_14c */
- {0x7b50, 0x7b54, 0x200, 0x10}, /* up_cim_200_to_23c */
- {0x7b50, 0x7b54, 0x240, 0x2}, /* up_cim_240_to_244 */
- {0x7b50, 0x7b54, 0x250, 0x2}, /* up_cim_250_to_254 */
- {0x7b50, 0x7b54, 0x260, 0x2}, /* up_cim_260_to_264 */
- {0x7b50, 0x7b54, 0x270, 0x2}, /* up_cim_270_to_274 */
- {0x7b50, 0x7b54, 0x280, 0x20}, /* up_cim_280_to_2fc */
- {0x7b50, 0x7b54, 0x300, 0x20}, /* up_cim_300_to_37c */
- {0x7b50, 0x7b54, 0x380, 0x14}, /* up_cim_380_to_3cc */
-
-};
-
-static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM] = {
- {0x7b50, 0x7b54, 0x2000, 0x20}, /* up_cim_2000_to_207c */
- {0x7b50, 0x7b54, 0x2080, 0x19}, /* up_cim_2080_to_20ec */
- {0x7b50, 0x7b54, 0x00, 0x20}, /* up_cim_00_to_7c */
- {0x7b50, 0x7b54, 0x80, 0x20}, /* up_cim_80_to_fc */
- {0x7b50, 0x7b54, 0x100, 0x11}, /* up_cim_100_to_14c */
- {0x7b50, 0x7b54, 0x200, 0x10}, /* up_cim_200_to_23c */
- {0x7b50, 0x7b54, 0x240, 0x2}, /* up_cim_240_to_244 */
- {0x7b50, 0x7b54, 0x250, 0x2}, /* up_cim_250_to_254 */
- {0x7b50, 0x7b54, 0x260, 0x2}, /* up_cim_260_to_264 */
- {0x7b50, 0x7b54, 0x270, 0x2}, /* up_cim_270_to_274 */
- {0x7b50, 0x7b54, 0x280, 0x20}, /* up_cim_280_to_2fc */
- {0x7b50, 0x7b54, 0x300, 0x20}, /* up_cim_300_to_37c */
- {0x7b50, 0x7b54, 0x380, 0x14}, /* up_cim_380_to_3cc */
+static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
+ {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */
+ {0x7b50, 0x7b54, 0x2080, 0x1d, 0}, /* up_cim_2080_to_20fc */
+ {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */
+ {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */
+ {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */
+ {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */
+ {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */
+ {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */
+ {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */
+ {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */
+ {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
+ {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
+ {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
+ {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
+ {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
+ {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
+ {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
+ {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
+ {0x7b50, 0x7b54, 0x2920, 0x10, 0x10}, /* up_cim_2920_to_2a10 */
+ {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2a14 */
+ {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
+ {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
+};
+
+static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
+ {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */
+ {0x7b50, 0x7b54, 0x2080, 0x19, 0}, /* up_cim_2080_to_20ec */
+ {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */
+ {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */
+ {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */
+ {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */
+ {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */
+ {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */
+ {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */
+ {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */
+ {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
+ {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
+ {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
+ {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
+ {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
+ {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
+ {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
+ {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
+ {0x7b50, 0x7b54, 0x2918, 0x4, 0x4}, /* up_cim_2918_to_3d54 */
+ {0x7b50, 0x7b54, 0x291c, 0x4, 0x4}, /* up_cim_291c_to_3d58 */
+ {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2914 */
+ {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
+ {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
};
static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
index e10ff1ee62c5..8568a51f6414 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
@@ -21,6 +21,7 @@
/* Error codes */
#define CUDBG_STATUS_NO_MEM -19
#define CUDBG_STATUS_ENTITY_NOT_FOUND -24
+#define CUDBG_STATUS_NOT_IMPLEMENTED -28
#define CUDBG_SYSTEM_ERROR -29
#define CUDBG_STATUS_CCLK_NOT_DEFINED -32
@@ -47,6 +48,8 @@ enum cudbg_dbg_entity_type {
CUDBG_CIM_OBQ_NCSI = 17,
CUDBG_EDC0 = 18,
CUDBG_EDC1 = 19,
+ CUDBG_MC0 = 20,
+ CUDBG_MC1 = 21,
CUDBG_RSS = 22,
CUDBG_RSS_VF_CONF = 25,
CUDBG_PATH_MTU = 27,
@@ -56,6 +59,7 @@ enum cudbg_dbg_entity_type {
CUDBG_SGE_INDIRECT = 37,
CUDBG_ULPRX_LA = 41,
CUDBG_TP_LA = 43,
+ CUDBG_MEMINFO = 44,
CUDBG_CIM_PIF_LA = 45,
CUDBG_CLK = 46,
CUDBG_CIM_OBQ_RXQ0 = 47,
@@ -63,6 +67,7 @@ enum cudbg_dbg_entity_type {
CUDBG_PCIE_INDIRECT = 50,
CUDBG_PM_INDIRECT = 51,
CUDBG_TID_INFO = 54,
+ CUDBG_PCIE_CONFIG = 55,
CUDBG_DUMP_CONTEXT = 56,
CUDBG_MPS_TCAM = 57,
CUDBG_VPD_DATA = 58,
@@ -74,6 +79,7 @@ enum cudbg_dbg_entity_type {
CUDBG_PBT_TABLE = 65,
CUDBG_MBOX_LOG = 66,
CUDBG_HMA_INDIRECT = 67,
+ CUDBG_HMA = 68,
CUDBG_MAX_ENTITY = 70,
};
@@ -81,6 +87,10 @@ struct cudbg_init {
struct adapter *adap; /* Pointer to adapter structure */
void *outbuf; /* Output buffer */
u32 outbuf_size; /* Output buffer size */
+ u8 compress_type; /* Type of compression to use */
+ void *compress_buff; /* Compression buffer */
+ u32 compress_buff_size; /* Compression buffer size */
+ void *workspace; /* Workspace for zlib */
};
static inline unsigned int cudbg_mbytes_to_bytes(unsigned int size)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index d699bf88d18f..557fd8bfd54e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -15,18 +15,65 @@
*
*/
+#include <linux/sort.h>
+
#include "t4_regs.h"
#include "cxgb4.h"
#include "cudbg_if.h"
#include "cudbg_lib_common.h"
-#include "cudbg_lib.h"
#include "cudbg_entity.h"
+#include "cudbg_lib.h"
+#include "cudbg_zlib.h"
-static void cudbg_write_and_release_buff(struct cudbg_buffer *pin_buff,
- struct cudbg_buffer *dbg_buff)
+static int cudbg_do_compression(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff,
+ struct cudbg_buffer *dbg_buff)
{
- cudbg_update_buff(pin_buff, dbg_buff);
- cudbg_put_buff(pin_buff, dbg_buff);
+ struct cudbg_buffer temp_in_buff = { 0 };
+ int bytes_left, bytes_read, bytes;
+ u32 offset = dbg_buff->offset;
+ int rc;
+
+ temp_in_buff.offset = pin_buff->offset;
+ temp_in_buff.data = pin_buff->data;
+ temp_in_buff.size = pin_buff->size;
+
+ bytes_left = pin_buff->size;
+ bytes_read = 0;
+ while (bytes_left > 0) {
+ /* Do compression in smaller chunks */
+ bytes = min_t(unsigned long, bytes_left,
+ (unsigned long)CUDBG_CHUNK_SIZE);
+ temp_in_buff.data = (char *)pin_buff->data + bytes_read;
+ temp_in_buff.size = bytes;
+ rc = cudbg_compress_buff(pdbg_init, &temp_in_buff, dbg_buff);
+ if (rc)
+ return rc;
+ bytes_left -= bytes;
+ bytes_read += bytes;
+ }
+
+ pin_buff->size = dbg_buff->offset - offset;
+ return 0;
+}
+
+static int cudbg_write_and_release_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff,
+ struct cudbg_buffer *dbg_buff)
+{
+ int rc = 0;
+
+ if (pdbg_init->compress_type == CUDBG_COMPRESSION_NONE) {
+ cudbg_update_buff(pin_buff, dbg_buff);
+ } else {
+ rc = cudbg_do_compression(pdbg_init, pin_buff, dbg_buff);
+ if (rc)
+ goto out;
+ }
+
+out:
+ cudbg_put_buff(pdbg_init, pin_buff);
+ return rc;
}
static int is_fw_attached(struct cudbg_init *pdbg_init)
@@ -84,6 +131,277 @@ static int cudbg_read_vpd_reg(struct adapter *padap, u32 addr, u32 len,
return 0;
}
+static int cudbg_mem_desc_cmp(const void *a, const void *b)
+{
+ return ((const struct cudbg_mem_desc *)a)->base -
+ ((const struct cudbg_mem_desc *)b)->base;
+}
+
+int cudbg_fill_meminfo(struct adapter *padap,
+ struct cudbg_meminfo *meminfo_buff)
+{
+ struct cudbg_mem_desc *md;
+ u32 lo, hi, used, alloc;
+ int n, i;
+
+ memset(meminfo_buff->avail, 0,
+ ARRAY_SIZE(meminfo_buff->avail) *
+ sizeof(struct cudbg_mem_desc));
+ memset(meminfo_buff->mem, 0,
+ (ARRAY_SIZE(cudbg_region) + 3) * sizeof(struct cudbg_mem_desc));
+ md = meminfo_buff->mem;
+
+ for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) {
+ meminfo_buff->mem[i].limit = 0;
+ meminfo_buff->mem[i].idx = i;
+ }
+
+ /* Find and sort the populated memory ranges */
+ i = 0;
+ lo = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
+ if (lo & EDRAM0_ENABLE_F) {
+ hi = t4_read_reg(padap, MA_EDRAM0_BAR_A);
+ meminfo_buff->avail[i].base =
+ cudbg_mbytes_to_bytes(EDRAM0_BASE_G(hi));
+ meminfo_buff->avail[i].limit =
+ meminfo_buff->avail[i].base +
+ cudbg_mbytes_to_bytes(EDRAM0_SIZE_G(hi));
+ meminfo_buff->avail[i].idx = 0;
+ i++;
+ }
+
+ if (lo & EDRAM1_ENABLE_F) {
+ hi = t4_read_reg(padap, MA_EDRAM1_BAR_A);
+ meminfo_buff->avail[i].base =
+ cudbg_mbytes_to_bytes(EDRAM1_BASE_G(hi));
+ meminfo_buff->avail[i].limit =
+ meminfo_buff->avail[i].base +
+ cudbg_mbytes_to_bytes(EDRAM1_SIZE_G(hi));
+ meminfo_buff->avail[i].idx = 1;
+ i++;
+ }
+
+ if (is_t5(padap->params.chip)) {
+ if (lo & EXT_MEM0_ENABLE_F) {
+ hi = t4_read_reg(padap, MA_EXT_MEMORY0_BAR_A);
+ meminfo_buff->avail[i].base =
+ cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi));
+ meminfo_buff->avail[i].limit =
+ meminfo_buff->avail[i].base +
+ cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi));
+ meminfo_buff->avail[i].idx = 3;
+ i++;
+ }
+
+ if (lo & EXT_MEM1_ENABLE_F) {
+ hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A);
+ meminfo_buff->avail[i].base =
+ cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi));
+ meminfo_buff->avail[i].limit =
+ meminfo_buff->avail[i].base +
+ cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi));
+ meminfo_buff->avail[i].idx = 4;
+ i++;
+ }
+ } else {
+ if (lo & EXT_MEM_ENABLE_F) {
+ hi = t4_read_reg(padap, MA_EXT_MEMORY_BAR_A);
+ meminfo_buff->avail[i].base =
+ cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi));
+ meminfo_buff->avail[i].limit =
+ meminfo_buff->avail[i].base +
+ cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi));
+ meminfo_buff->avail[i].idx = 2;
+ i++;
+ }
+
+ if (lo & HMA_MUX_F) {
+ hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A);
+ meminfo_buff->avail[i].base =
+ cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi));
+ meminfo_buff->avail[i].limit =
+ meminfo_buff->avail[i].base +
+ cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi));
+ meminfo_buff->avail[i].idx = 5;
+ i++;
+ }
+ }
+
+ if (!i) /* no memory available */
+ return CUDBG_STATUS_ENTITY_NOT_FOUND;
+
+ meminfo_buff->avail_c = i;
+ sort(meminfo_buff->avail, i, sizeof(struct cudbg_mem_desc),
+ cudbg_mem_desc_cmp, NULL);
+ (md++)->base = t4_read_reg(padap, SGE_DBQ_CTXT_BADDR_A);
+ (md++)->base = t4_read_reg(padap, SGE_IMSG_CTXT_BADDR_A);
+ (md++)->base = t4_read_reg(padap, SGE_FLM_CACHE_BADDR_A);
+ (md++)->base = t4_read_reg(padap, TP_CMM_TCB_BASE_A);
+ (md++)->base = t4_read_reg(padap, TP_CMM_MM_BASE_A);
+ (md++)->base = t4_read_reg(padap, TP_CMM_TIMER_BASE_A);
+ (md++)->base = t4_read_reg(padap, TP_CMM_MM_RX_FLST_BASE_A);
+ (md++)->base = t4_read_reg(padap, TP_CMM_MM_TX_FLST_BASE_A);
+ (md++)->base = t4_read_reg(padap, TP_CMM_MM_PS_FLST_BASE_A);
+
+ /* the next few have explicit upper bounds */
+ md->base = t4_read_reg(padap, TP_PMM_TX_BASE_A);
+ md->limit = md->base - 1 +
+ t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A) *
+ PMTXMAXPAGE_G(t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A));
+ md++;
+
+ md->base = t4_read_reg(padap, TP_PMM_RX_BASE_A);
+ md->limit = md->base - 1 +
+ t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) *
+ PMRXMAXPAGE_G(t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A));
+ md++;
+
+ if (t4_read_reg(padap, LE_DB_CONFIG_A) & HASHEN_F) {
+ if (CHELSIO_CHIP_VERSION(padap->params.chip) <= CHELSIO_T5) {
+ hi = t4_read_reg(padap, LE_DB_TID_HASHBASE_A) / 4;
+ md->base = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
+ } else {
+ hi = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
+ md->base = t4_read_reg(padap,
+ LE_DB_HASH_TBL_BASE_ADDR_A);
+ }
+ md->limit = 0;
+ } else {
+ md->base = 0;
+ md->idx = ARRAY_SIZE(cudbg_region); /* hide it */
+ }
+ md++;
+
+#define ulp_region(reg) do { \
+ md->base = t4_read_reg(padap, ULP_ ## reg ## _LLIMIT_A);\
+ (md++)->limit = t4_read_reg(padap, ULP_ ## reg ## _ULIMIT_A);\
+} while (0)
+
+ ulp_region(RX_ISCSI);
+ ulp_region(RX_TDDP);
+ ulp_region(TX_TPT);
+ ulp_region(RX_STAG);
+ ulp_region(RX_RQ);
+ ulp_region(RX_RQUDP);
+ ulp_region(RX_PBL);
+ ulp_region(TX_PBL);
+#undef ulp_region
+ md->base = 0;
+ md->idx = ARRAY_SIZE(cudbg_region);
+ if (!is_t4(padap->params.chip)) {
+ u32 fifo_size = t4_read_reg(padap, SGE_DBVFIFO_SIZE_A);
+ u32 sge_ctrl = t4_read_reg(padap, SGE_CONTROL2_A);
+ u32 size = 0;
+
+ if (is_t5(padap->params.chip)) {
+ if (sge_ctrl & VFIFO_ENABLE_F)
+ size = DBVFIFO_SIZE_G(fifo_size);
+ } else {
+ size = T6_DBVFIFO_SIZE_G(fifo_size);
+ }
+
+ if (size) {
+ md->base = BASEADDR_G(t4_read_reg(padap,
+ SGE_DBVFIFO_BADDR_A));
+ md->limit = md->base + (size << 2) - 1;
+ }
+ }
+
+ md++;
+
+ md->base = t4_read_reg(padap, ULP_RX_CTX_BASE_A);
+ md->limit = 0;
+ md++;
+ md->base = t4_read_reg(padap, ULP_TX_ERR_TABLE_BASE_A);
+ md->limit = 0;
+ md++;
+
+ md->base = padap->vres.ocq.start;
+ if (padap->vres.ocq.size)
+ md->limit = md->base + padap->vres.ocq.size - 1;
+ else
+ md->idx = ARRAY_SIZE(cudbg_region); /* hide it */
+ md++;
+
+ /* add any address-space holes, there can be up to 3 */
+ for (n = 0; n < i - 1; n++)
+ if (meminfo_buff->avail[n].limit <
+ meminfo_buff->avail[n + 1].base)
+ (md++)->base = meminfo_buff->avail[n].limit;
+
+ if (meminfo_buff->avail[n].limit)
+ (md++)->base = meminfo_buff->avail[n].limit;
+
+ n = md - meminfo_buff->mem;
+ meminfo_buff->mem_c = n;
+
+ sort(meminfo_buff->mem, n, sizeof(struct cudbg_mem_desc),
+ cudbg_mem_desc_cmp, NULL);
+
+ lo = t4_read_reg(padap, CIM_SDRAM_BASE_ADDR_A);
+ hi = t4_read_reg(padap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
+ meminfo_buff->up_ram_lo = lo;
+ meminfo_buff->up_ram_hi = hi;
+
+ lo = t4_read_reg(padap, CIM_EXTMEM2_BASE_ADDR_A);
+ hi = t4_read_reg(padap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
+ meminfo_buff->up_extmem2_lo = lo;
+ meminfo_buff->up_extmem2_hi = hi;
+
+ lo = t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A);
+ meminfo_buff->rx_pages_data[0] = PMRXMAXPAGE_G(lo);
+ meminfo_buff->rx_pages_data[1] =
+ t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) >> 10;
+ meminfo_buff->rx_pages_data[2] = (lo & PMRXNUMCHN_F) ? 2 : 1;
+
+ lo = t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A);
+ hi = t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A);
+ meminfo_buff->tx_pages_data[0] = PMTXMAXPAGE_G(lo);
+ meminfo_buff->tx_pages_data[1] =
+ hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
+ meminfo_buff->tx_pages_data[2] =
+ hi >= (1 << 20) ? 'M' : 'K';
+ meminfo_buff->tx_pages_data[3] = 1 << PMTXNUMCHN_G(lo);
+
+ meminfo_buff->p_structs = t4_read_reg(padap, TP_CMM_MM_MAX_PSTRUCT_A);
+
+ for (i = 0; i < 4; i++) {
+ if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
+ lo = t4_read_reg(padap,
+ MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
+ else
+ lo = t4_read_reg(padap, MPS_RX_PG_RSV0_A + i * 4);
+ if (is_t5(padap->params.chip)) {
+ used = T5_USED_G(lo);
+ alloc = T5_ALLOC_G(lo);
+ } else {
+ used = USED_G(lo);
+ alloc = ALLOC_G(lo);
+ }
+ meminfo_buff->port_used[i] = used;
+ meminfo_buff->port_alloc[i] = alloc;
+ }
+
+ for (i = 0; i < padap->params.arch.nchan; i++) {
+ if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
+ lo = t4_read_reg(padap,
+ MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
+ else
+ lo = t4_read_reg(padap, MPS_RX_PG_RSV4_A + i * 4);
+ if (is_t5(padap->params.chip)) {
+ used = T5_USED_G(lo);
+ alloc = T5_ALLOC_G(lo);
+ } else {
+ used = USED_G(lo);
+ alloc = ALLOC_G(lo);
+ }
+ meminfo_buff->loopback_used[i] = used;
+ meminfo_buff->loopback_alloc[i] = alloc;
+ }
+
+ return 0;
+}
+
int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
@@ -98,12 +416,11 @@ int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
else if (is_t5(padap->params.chip) || is_t6(padap->params.chip))
buf_size = T5_REGMAP_SIZE;
- rc = cudbg_get_buff(dbg_buff, buf_size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, buf_size, &temp_buff);
if (rc)
return rc;
t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
@@ -122,7 +439,7 @@ int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
}
dparams = &padap->params.devlog;
- rc = cudbg_get_buff(dbg_buff, dparams->size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, dparams->size, &temp_buff);
if (rc)
return rc;
@@ -137,12 +454,11 @@ int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
spin_unlock(&padap->win0_lock);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
@@ -163,14 +479,14 @@ int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
}
size += sizeof(cfg);
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
rc = t4_cim_read(padap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
@@ -180,11 +496,10 @@ int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
NULL);
if (rc < 0) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
@@ -196,7 +511,7 @@ int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
int size, rc;
size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -204,8 +519,7 @@ int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
(u32 *)temp_buff.data,
(u32 *)((char *)temp_buff.data +
5 * CIM_MALA_SIZE));
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
@@ -217,7 +531,7 @@ int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
struct cudbg_cim_qcfg *cim_qcfg_data;
int rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_cim_qcfg),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_cim_qcfg),
&temp_buff);
if (rc)
return rc;
@@ -228,7 +542,7 @@ int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
@@ -237,14 +551,13 @@ int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
cim_qcfg_data->obq_wr);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
t4_read_cimq_cfg(padap, cim_qcfg_data->base, cim_qcfg_data->size,
cim_qcfg_data->thres);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
@@ -258,7 +571,7 @@ static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
/* collect CIM IBQ */
qsize = CIM_IBQ_SIZE * 4 * sizeof(u32);
- rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff);
if (rc)
return rc;
@@ -272,11 +585,10 @@ static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
else
rc = no_of_read_words;
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
@@ -343,7 +655,7 @@ static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
/* collect CIM OBQ */
qsize = cudbg_cim_obq_size(padap, qid);
- rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff);
if (rc)
return rc;
@@ -357,11 +669,10 @@ static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
else
rc = no_of_read_words;
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
@@ -420,23 +731,211 @@ int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7);
}
+static int cudbg_meminfo_get_mem_index(struct adapter *padap,
+ struct cudbg_meminfo *mem_info,
+ u8 mem_type, u8 *idx)
+{
+ u8 i, flag;
+
+ switch (mem_type) {
+ case MEM_EDC0:
+ flag = EDC0_FLAG;
+ break;
+ case MEM_EDC1:
+ flag = EDC1_FLAG;
+ break;
+ case MEM_MC0:
+ /* Some T5 cards have both MC0 and MC1. */
+ flag = is_t5(padap->params.chip) ? MC0_FLAG : MC_FLAG;
+ break;
+ case MEM_MC1:
+ flag = MC1_FLAG;
+ break;
+ case MEM_HMA:
+ flag = HMA_FLAG;
+ break;
+ default:
+ return CUDBG_STATUS_ENTITY_NOT_FOUND;
+ }
+
+ for (i = 0; i < mem_info->avail_c; i++) {
+ if (mem_info->avail[i].idx == flag) {
+ *idx = i;
+ return 0;
+ }
+ }
+
+ return CUDBG_STATUS_ENTITY_NOT_FOUND;
+}
+
+/* Fetch the @region_name's start and end from @meminfo. */
+static int cudbg_get_mem_region(struct adapter *padap,
+ struct cudbg_meminfo *meminfo,
+ u8 mem_type, const char *region_name,
+ struct cudbg_mem_desc *mem_desc)
+{
+ u8 mc, found = 0;
+ u32 i, idx = 0;
+ int rc;
+
+ rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < ARRAY_SIZE(cudbg_region); i++) {
+ if (!strcmp(cudbg_region[i], region_name)) {
+ found = 1;
+ idx = i;
+ break;
+ }
+ }
+ if (!found)
+ return -EINVAL;
+
+ found = 0;
+ for (i = 0; i < meminfo->mem_c; i++) {
+ if (meminfo->mem[i].idx >= ARRAY_SIZE(cudbg_region))
+ continue; /* Skip holes */
+
+ if (!(meminfo->mem[i].limit))
+ meminfo->mem[i].limit =
+ i < meminfo->mem_c - 1 ?
+ meminfo->mem[i + 1].base - 1 : ~0;
+
+ if (meminfo->mem[i].idx == idx) {
+ /* Check if the region exists in @mem_type memory */
+ if (meminfo->mem[i].base < meminfo->avail[mc].base &&
+ meminfo->mem[i].limit < meminfo->avail[mc].base)
+ return -EINVAL;
+
+ if (meminfo->mem[i].base > meminfo->avail[mc].limit)
+ return -EINVAL;
+
+ memcpy(mem_desc, &meminfo->mem[i],
+ sizeof(struct cudbg_mem_desc));
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Fetch and update the start and end of the requested memory region w.r.t 0
+ * in the corresponding EDC/MC/HMA.
+ */
+static int cudbg_get_mem_relative(struct adapter *padap,
+ struct cudbg_meminfo *meminfo,
+ u8 mem_type, u32 *out_base, u32 *out_end)
+{
+ u8 mc_idx;
+ int rc;
+
+ rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc_idx);
+ if (rc)
+ return rc;
+
+ if (*out_base < meminfo->avail[mc_idx].base)
+ *out_base = 0;
+ else
+ *out_base -= meminfo->avail[mc_idx].base;
+
+ if (*out_end > meminfo->avail[mc_idx].limit)
+ *out_end = meminfo->avail[mc_idx].limit;
+ else
+ *out_end -= meminfo->avail[mc_idx].base;
+
+ return 0;
+}
+
+/* Get TX and RX Payload region */
+static int cudbg_get_payload_range(struct adapter *padap, u8 mem_type,
+ const char *region_name,
+ struct cudbg_region_info *payload)
+{
+ struct cudbg_mem_desc mem_desc = { 0 };
+ struct cudbg_meminfo meminfo;
+ int rc;
+
+ rc = cudbg_fill_meminfo(padap, &meminfo);
+ if (rc)
+ return rc;
+
+ rc = cudbg_get_mem_region(padap, &meminfo, mem_type, region_name,
+ &mem_desc);
+ if (rc) {
+ payload->exist = false;
+ return 0;
+ }
+
+ payload->exist = true;
+ payload->start = mem_desc.base;
+ payload->end = mem_desc.limit;
+
+ return cudbg_get_mem_relative(padap, &meminfo, mem_type,
+ &payload->start, &payload->end);
+}
+
+#define CUDBG_YIELD_ITERATION 256
+
static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff, u8 mem_type,
unsigned long tot_len,
struct cudbg_error *cudbg_err)
{
+ static const char * const region_name[] = { "Tx payload:",
+ "Rx payload:" };
unsigned long bytes, bytes_left, bytes_read = 0;
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
+ struct cudbg_region_info payload[2];
+ u32 yield_count = 0;
int rc = 0;
+ u8 i;
+
+ /* Get TX/RX Payload region range if they exist */
+ memset(payload, 0, sizeof(payload));
+ for (i = 0; i < ARRAY_SIZE(region_name); i++) {
+ rc = cudbg_get_payload_range(padap, mem_type, region_name[i],
+ &payload[i]);
+ if (rc)
+ return rc;
+
+ if (payload[i].exist) {
+ /* Align start and end to avoid wrap around */
+ payload[i].start = roundup(payload[i].start,
+ CUDBG_CHUNK_SIZE);
+ payload[i].end = rounddown(payload[i].end,
+ CUDBG_CHUNK_SIZE);
+ }
+ }
bytes_left = tot_len;
while (bytes_left > 0) {
+ /* As MC size is huge and read through PIO access, this
+ * loop will hold cpu for a longer time. OS may think that
+ * the process is hanged and will generate CPU stall traces.
+ * So yield the cpu regularly.
+ */
+ yield_count++;
+ if (!(yield_count % CUDBG_YIELD_ITERATION))
+ schedule();
+
bytes = min_t(unsigned long, bytes_left,
(unsigned long)CUDBG_CHUNK_SIZE);
- rc = cudbg_get_buff(dbg_buff, bytes, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, bytes, &temp_buff);
if (rc)
return rc;
+
+ for (i = 0; i < ARRAY_SIZE(payload); i++)
+ if (payload[i].exist &&
+ bytes_read >= payload[i].start &&
+ bytes_read + bytes <= payload[i].end)
+ /* TX and RX Payload regions can't overlap */
+ goto skip_read;
+
spin_lock(&padap->win0_lock);
rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type,
bytes_read, bytes,
@@ -445,37 +944,23 @@ static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
spin_unlock(&padap->win0_lock);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
+
+skip_read:
bytes_left -= bytes;
bytes_read += bytes;
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff,
+ dbg_buff);
+ if (rc) {
+ cudbg_put_buff(pdbg_init, &temp_buff);
+ return rc;
+ }
}
return rc;
}
-static void cudbg_collect_mem_info(struct cudbg_init *pdbg_init,
- struct card_mem *mem_info)
-{
- struct adapter *padap = pdbg_init->adap;
- u32 value;
-
- value = t4_read_reg(padap, MA_EDRAM0_BAR_A);
- value = EDRAM0_SIZE_G(value);
- mem_info->size_edc0 = (u16)value;
-
- value = t4_read_reg(padap, MA_EDRAM1_BAR_A);
- value = EDRAM1_SIZE_G(value);
- mem_info->size_edc1 = (u16)value;
-
- value = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
- if (value & EDRAM0_ENABLE_F)
- mem_info->mem_flag |= (1 << EDC0_FLAG);
- if (value & EDRAM1_ENABLE_F)
- mem_info->mem_flag |= (1 << EDC1_FLAG);
-}
-
static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
struct cudbg_error *cudbg_err)
{
@@ -495,37 +980,25 @@ static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
struct cudbg_error *cudbg_err,
u8 mem_type)
{
- struct card_mem mem_info = {0};
- unsigned long flag, size;
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_meminfo mem_info;
+ unsigned long size;
+ u8 mc_idx;
int rc;
+ memset(&mem_info, 0, sizeof(struct cudbg_meminfo));
+ rc = cudbg_fill_meminfo(padap, &mem_info);
+ if (rc)
+ return rc;
+
cudbg_t4_fwcache(pdbg_init, cudbg_err);
- cudbg_collect_mem_info(pdbg_init, &mem_info);
- switch (mem_type) {
- case MEM_EDC0:
- flag = (1 << EDC0_FLAG);
- size = cudbg_mbytes_to_bytes(mem_info.size_edc0);
- break;
- case MEM_EDC1:
- flag = (1 << EDC1_FLAG);
- size = cudbg_mbytes_to_bytes(mem_info.size_edc1);
- break;
- default:
- rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
- goto err;
- }
+ rc = cudbg_meminfo_get_mem_index(padap, &mem_info, mem_type, &mc_idx);
+ if (rc)
+ return rc;
- if (mem_info.mem_flag & flag) {
- rc = cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type,
- size, cudbg_err);
- if (rc)
- goto err;
- } else {
- rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
- goto err;
- }
-err:
- return rc;
+ size = mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base;
+ return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size,
+ cudbg_err);
}
int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
@@ -544,26 +1017,51 @@ int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
MEM_EDC1);
}
+int cudbg_collect_mc0_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
+ MEM_MC0);
+}
+
+int cudbg_collect_mc1_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
+ MEM_MC1);
+}
+
+int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
+ MEM_HMA);
+}
+
int cudbg_collect_rss(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
- int rc;
+ int rc, nentries;
- rc = cudbg_get_buff(dbg_buff, RSS_NENTRIES * sizeof(u16), &temp_buff);
+ nentries = t4_chip_rss_size(padap);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, nentries * sizeof(u16),
+ &temp_buff);
if (rc)
return rc;
rc = t4_read_rss(padap, (u16 *)temp_buff.data);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
@@ -576,7 +1074,7 @@ int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
int vf, rc, vf_count;
vf_count = padap->params.arch.vfcount;
- rc = cudbg_get_buff(dbg_buff,
+ rc = cudbg_get_buff(pdbg_init, dbg_buff,
vf_count * sizeof(struct cudbg_rss_vf_conf),
&temp_buff);
if (rc)
@@ -586,8 +1084,7 @@ int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
for (vf = 0; vf < vf_count; vf++)
t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
&vfconf[vf].rss_vf_vfh, true);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init,
@@ -598,13 +1095,13 @@ int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init,
struct cudbg_buffer temp_buff = { 0 };
int rc;
- rc = cudbg_get_buff(dbg_buff, NMTUS * sizeof(u16), &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, NMTUS * sizeof(u16),
+ &temp_buff);
if (rc)
return rc;
t4_read_mtu_tbl(padap, (u16 *)temp_buff.data, NULL);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
@@ -616,7 +1113,7 @@ int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
struct cudbg_pm_stats *pm_stats_buff;
int rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pm_stats),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_pm_stats),
&temp_buff);
if (rc)
return rc;
@@ -624,8 +1121,7 @@ int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
pm_stats_buff = (struct cudbg_pm_stats *)temp_buff.data;
t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
@@ -640,7 +1136,7 @@ int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
if (!padap->params.vpd.cclk)
return CUDBG_STATUS_CCLK_NOT_DEFINED;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_hw_sched),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_hw_sched),
&temp_buff);
hw_sched_buff = (struct cudbg_hw_sched *)temp_buff.data;
hw_sched_buff->map = t4_read_reg(padap, TP_TX_MOD_QUEUE_REQ_MAP_A);
@@ -649,8 +1145,7 @@ int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
for (i = 0; i < NTX_SCHED; ++i)
t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
&hw_sched_buff->ipg[i], true);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
@@ -674,7 +1169,7 @@ int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
n = n / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -763,8 +1258,7 @@ int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
tp_pio->ireg_local_offset, true);
ch_tp_pio++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
@@ -776,7 +1270,8 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
struct ireg_buf *ch_sge_dbg;
int i, rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(*ch_sge_dbg) * 2, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(*ch_sge_dbg) * 2,
+ &temp_buff);
if (rc)
return rc;
@@ -797,8 +1292,7 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
sge_pio->ireg_local_offset);
ch_sge_dbg++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
@@ -810,7 +1304,7 @@ int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
struct cudbg_ulprx_la *ulprx_la_buff;
int rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulprx_la),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulprx_la),
&temp_buff);
if (rc)
return rc;
@@ -818,8 +1312,7 @@ int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data;
t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
ulprx_la_buff->size = ULPRX_LA_SIZE;
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
@@ -832,15 +1325,39 @@ int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
int size, rc;
size = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
tp_la_buff = (struct cudbg_tp_la *)temp_buff.data;
tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A));
t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
+}
+
+int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct cudbg_meminfo *meminfo_buff;
+ int rc;
+
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_meminfo),
+ &temp_buff);
+ if (rc)
+ return rc;
+
+ meminfo_buff = (struct cudbg_meminfo *)temp_buff.data;
+ rc = cudbg_fill_meminfo(padap, meminfo_buff);
+ if (rc) {
+ cudbg_err->sys_err = rc;
+ cudbg_put_buff(pdbg_init, &temp_buff);
+ return rc;
+ }
+
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
@@ -854,7 +1371,7 @@ int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
size = sizeof(struct cudbg_cim_pif_la) +
2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -863,8 +1380,7 @@ int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
(u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
NULL, NULL);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
@@ -880,7 +1396,7 @@ int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
if (!padap->params.vpd.cclk)
return CUDBG_STATUS_CCLK_NOT_DEFINED;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_clk_info),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_clk_info),
&temp_buff);
if (rc)
return rc;
@@ -912,8 +1428,7 @@ int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
clk_info_buff->finwait2_timer =
tp_tick_us * t4_read_reg(padap, TP_FINWAIT2_TIMER_A);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
@@ -928,7 +1443,7 @@ int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n * 2;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -969,8 +1484,7 @@ int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
pcie_pio->ireg_local_offset);
ch_pcie++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
@@ -985,7 +1499,7 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n * 2;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1026,8 +1540,7 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
pm_pio->ireg_local_offset);
ch_pm++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_tid(struct cudbg_init *pdbg_init,
@@ -1041,7 +1554,8 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
u32 para[2], val[2];
int rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_tid_info_region_rev1),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff,
+ sizeof(struct cudbg_tid_info_region_rev1),
&temp_buff);
if (rc)
return rc;
@@ -1053,6 +1567,12 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
tid1->ver_hdr.size = sizeof(struct cudbg_tid_info_region_rev1) -
sizeof(struct cudbg_ver_hdr);
+ /* If firmware is not attached/alive, use backdoor register
+ * access to collect dump.
+ */
+ if (!is_fw_attached(pdbg_init))
+ goto fill_tid;
+
#define FW_PARAM_PFVF_A(param) \
(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
@@ -1064,7 +1584,7 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, para, val);
if (rc < 0) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
tid->uotid_base = val[0];
@@ -1083,13 +1603,16 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
para, val);
if (rc < 0) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
tid->hpftid_base = val[0];
tid->nhpftids = val[1] - val[0] + 1;
}
+#undef FW_PARAM_PFVF_A
+
+fill_tid:
tid->ntids = padap->tids.ntids;
tid->nstids = padap->tids.nstids;
tid->stid_base = padap->tids.stid_base;
@@ -1109,28 +1632,137 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
tid->ip_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV4_A);
tid->ipv6_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV6_A);
-#undef FW_PARAM_PFVF_A
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
+}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ u32 size, *value, j;
+ int i, rc, n;
+
+ size = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS;
+ n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32));
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
+ if (rc)
+ return rc;
+
+ value = (u32 *)temp_buff.data;
+ for (i = 0; i < n; i++) {
+ for (j = t5_pcie_config_array[i][0];
+ j <= t5_pcie_config_array[i][1]; j += 4) {
+ t4_hw_pci_read_cfg4(padap, j, value);
+ value++;
+ }
+ }
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
-int cudbg_dump_context_size(struct adapter *padap)
+static int cudbg_sge_ctxt_check_valid(u32 *buf, int type)
+{
+ int index, bit, bit_pos = 0;
+
+ switch (type) {
+ case CTXT_EGRESS:
+ bit_pos = 176;
+ break;
+ case CTXT_INGRESS:
+ bit_pos = 141;
+ break;
+ case CTXT_FLM:
+ bit_pos = 89;
+ break;
+ }
+ index = bit_pos / 32;
+ bit = bit_pos % 32;
+ return buf[index] & (1U << bit);
+}
+
+static int cudbg_get_ctxt_region_info(struct adapter *padap,
+ struct cudbg_region_info *ctx_info,
+ u8 *mem_type)
{
- u32 value, size;
+ struct cudbg_mem_desc mem_desc;
+ struct cudbg_meminfo meminfo;
+ u32 i, j, value, found;
u8 flq;
+ int rc;
+
+ rc = cudbg_fill_meminfo(padap, &meminfo);
+ if (rc)
+ return rc;
+
+ /* Get EGRESS and INGRESS context region size */
+ for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) {
+ found = 0;
+ memset(&mem_desc, 0, sizeof(struct cudbg_mem_desc));
+ for (j = 0; j < ARRAY_SIZE(meminfo.avail); j++) {
+ rc = cudbg_get_mem_region(padap, &meminfo, j,
+ cudbg_region[i],
+ &mem_desc);
+ if (!rc) {
+ found = 1;
+ rc = cudbg_get_mem_relative(padap, &meminfo, j,
+ &mem_desc.base,
+ &mem_desc.limit);
+ if (rc) {
+ ctx_info[i].exist = false;
+ break;
+ }
+ ctx_info[i].exist = true;
+ ctx_info[i].start = mem_desc.base;
+ ctx_info[i].end = mem_desc.limit;
+ mem_type[i] = j;
+ break;
+ }
+ }
+ if (!found)
+ ctx_info[i].exist = false;
+ }
+ /* Get FLM and CNM max qid. */
value = t4_read_reg(padap, SGE_FLM_CFG_A);
/* Get number of data freelist queues */
flq = HDRSTARTFLQ_G(value);
- size = CUDBG_MAX_FL_QIDS >> flq;
+ ctx_info[CTXT_FLM].exist = true;
+ ctx_info[CTXT_FLM].end = (CUDBG_MAX_FL_QIDS >> flq) * SGE_CTXT_SIZE;
- /* Add extra space for congestion manager contexts.
- * The number of CONM contexts are same as number of freelist
+ /* The number of CONM contexts are same as number of freelist
* queues.
*/
- size += size;
+ ctx_info[CTXT_CNM].exist = true;
+ ctx_info[CTXT_CNM].end = ctx_info[CTXT_FLM].end;
+
+ return 0;
+}
+
+int cudbg_dump_context_size(struct adapter *padap)
+{
+ struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} };
+ u8 mem_type[CTXT_INGRESS + 1] = { 0 };
+ u32 i, size = 0;
+ int rc;
+
+ /* Get max valid qid for each type of queue */
+ rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < CTXT_CNM; i++) {
+ if (!region_info[i].exist) {
+ if (i == CTXT_EGRESS || i == CTXT_INGRESS)
+ size += CUDBG_LOWMEM_MAX_CTXT_QIDS *
+ SGE_CTXT_SIZE;
+ continue;
+ }
+
+ size += (region_info[i].end - region_info[i].start + 1) /
+ SGE_CTXT_SIZE;
+ }
return size * sizeof(struct cudbg_ch_cntxt);
}
@@ -1153,44 +1785,144 @@ static void cudbg_read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
}
+static void cudbg_get_sge_ctxt_fw(struct cudbg_init *pdbg_init, u32 max_qid,
+ u8 ctxt_type,
+ struct cudbg_ch_cntxt **out_buff)
+{
+ struct cudbg_ch_cntxt *buff = *out_buff;
+ int rc;
+ u32 j;
+
+ for (j = 0; j < max_qid; j++) {
+ cudbg_read_sge_ctxt(pdbg_init, j, ctxt_type, buff->data);
+ rc = cudbg_sge_ctxt_check_valid(buff->data, ctxt_type);
+ if (!rc)
+ continue;
+
+ buff->cntxt_type = ctxt_type;
+ buff->cntxt_id = j;
+ buff++;
+ if (ctxt_type == CTXT_FLM) {
+ cudbg_read_sge_ctxt(pdbg_init, j, CTXT_CNM, buff->data);
+ buff->cntxt_type = CTXT_CNM;
+ buff->cntxt_id = j;
+ buff++;
+ }
+ }
+
+ *out_buff = buff;
+}
+
int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
+ struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} };
struct adapter *padap = pdbg_init->adap;
+ u32 j, size, max_ctx_size, max_ctx_qid;
+ u8 mem_type[CTXT_INGRESS + 1] = { 0 };
struct cudbg_buffer temp_buff = { 0 };
struct cudbg_ch_cntxt *buff;
- u32 size, i = 0;
+ u64 *dst_off, *src_off;
+ u8 *ctx_buf;
+ u8 i, k;
int rc;
+ /* Get max valid qid for each type of queue */
+ rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type);
+ if (rc)
+ return rc;
+
rc = cudbg_dump_context_size(padap);
if (rc <= 0)
return CUDBG_STATUS_ENTITY_NOT_FOUND;
size = rc;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
+ /* Get buffer with enough space to read the biggest context
+ * region in memory.
+ */
+ max_ctx_size = max(region_info[CTXT_EGRESS].end -
+ region_info[CTXT_EGRESS].start + 1,
+ region_info[CTXT_INGRESS].end -
+ region_info[CTXT_INGRESS].start + 1);
+
+ ctx_buf = kvzalloc(max_ctx_size, GFP_KERNEL);
+ if (!ctx_buf) {
+ cudbg_put_buff(pdbg_init, &temp_buff);
+ return -ENOMEM;
+ }
+
buff = (struct cudbg_ch_cntxt *)temp_buff.data;
- while (size > 0) {
- buff->cntxt_type = CTXT_FLM;
- buff->cntxt_id = i;
- cudbg_read_sge_ctxt(pdbg_init, i, CTXT_FLM, buff->data);
- buff++;
- size -= sizeof(struct cudbg_ch_cntxt);
- buff->cntxt_type = CTXT_CNM;
- buff->cntxt_id = i;
- cudbg_read_sge_ctxt(pdbg_init, i, CTXT_CNM, buff->data);
- buff++;
- size -= sizeof(struct cudbg_ch_cntxt);
+ /* Collect EGRESS and INGRESS context data.
+ * In case of failures, fallback to collecting via FW or
+ * backdoor access.
+ */
+ for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) {
+ if (!region_info[i].exist) {
+ max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS;
+ cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i,
+ &buff);
+ continue;
+ }
- i++;
+ max_ctx_size = region_info[i].end - region_info[i].start + 1;
+ max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE;
+
+ /* If firmware is not attached/alive, use backdoor register
+ * access to collect dump.
+ */
+ if (is_fw_attached(pdbg_init)) {
+ t4_sge_ctxt_flush(padap, padap->mbox, i);
+
+ rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type[i],
+ region_info[i].start, max_ctx_size,
+ (__be32 *)ctx_buf, 1);
+ }
+
+ if (rc || !is_fw_attached(pdbg_init)) {
+ max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS;
+ cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i,
+ &buff);
+ continue;
+ }
+
+ for (j = 0; j < max_ctx_qid; j++) {
+ src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE);
+ dst_off = (u64 *)buff->data;
+
+ /* The data is stored in 64-bit cpu order. Convert it
+ * to big endian before parsing.
+ */
+ for (k = 0; k < SGE_CTXT_SIZE / sizeof(u64); k++)
+ dst_off[k] = cpu_to_be64(src_off[k]);
+
+ rc = cudbg_sge_ctxt_check_valid(buff->data, i);
+ if (!rc)
+ continue;
+
+ buff->cntxt_type = i;
+ buff->cntxt_id = j;
+ buff++;
+ }
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ kvfree(ctx_buf);
+
+ /* Collect FREELIST and CONGESTION MANAGER contexts */
+ max_ctx_size = region_info[CTXT_FLM].end -
+ region_info[CTXT_FLM].start + 1;
+ max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE;
+ /* Since FLM and CONM are 1-to-1 mapped, the below function
+ * will fetch both FLM and CONM contexts.
+ */
+ cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, CTXT_FLM, &buff);
+
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
@@ -1228,9 +1960,10 @@ static void cudbg_mps_rpl_backdoor(struct adapter *padap,
mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP0_A));
}
-static int cudbg_collect_tcam_index(struct adapter *padap,
+static int cudbg_collect_tcam_index(struct cudbg_init *pdbg_init,
struct cudbg_mps_tcam *tcam, u32 idx)
{
+ struct adapter *padap = pdbg_init->adap;
u64 tcamy, tcamx, val;
u32 ctl, data2;
int rc = 0;
@@ -1315,12 +2048,22 @@ static int cudbg_collect_tcam_index(struct adapter *padap,
htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
FW_LDST_CMD_IDX_V(idx));
- rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd, sizeof(ldst_cmd),
- &ldst_cmd);
- if (rc)
+ /* If firmware is not attached/alive, use backdoor register
+ * access to collect dump.
+ */
+ if (is_fw_attached(pdbg_init))
+ rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd,
+ sizeof(ldst_cmd), &ldst_cmd);
+
+ if (rc || !is_fw_attached(pdbg_init)) {
cudbg_mps_rpl_backdoor(padap, &mps_rplc);
- else
+ /* Ignore error since we collected directly from
+ * reading registers.
+ */
+ rc = 0;
+ } else {
mps_rplc = ldst_cmd.u.mps.rplc;
+ }
tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
@@ -1351,16 +2094,16 @@ int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
n = padap->params.arch.mps_tcam_size;
size = sizeof(struct cudbg_mps_tcam) * n;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
tcam = (struct cudbg_mps_tcam *)temp_buff.data;
for (i = 0; i < n; i++) {
- rc = cudbg_collect_tcam_index(padap, tcam, i);
+ rc = cudbg_collect_tcam_index(pdbg_init, tcam, i);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
total_size += sizeof(struct cudbg_mps_tcam);
@@ -1370,11 +2113,10 @@ int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
if (!total_size) {
rc = CUDBG_SYSTEM_ERROR;
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
@@ -1425,7 +2167,7 @@ int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
if (rc)
return rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_vpd_data),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_vpd_data),
&temp_buff);
if (rc)
return rc;
@@ -1441,8 +2183,7 @@ int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(fw_vers);
vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(fw_vers);
vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(fw_vers);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid,
@@ -1593,7 +2334,7 @@ int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
size += sizeof(struct cudbg_tcam);
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1605,7 +2346,7 @@ int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
rc = cudbg_read_tid(pdbg_init, i, tid_data);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
@@ -1616,8 +2357,7 @@ int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
bytes += sizeof(struct cudbg_tid_data);
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
@@ -1630,13 +2370,12 @@ int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
int rc;
size = sizeof(u16) * NMTUS * NCCTRL_WIN;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
t4_read_cong_tbl(padap, (void *)temp_buff.data);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
@@ -1654,7 +2393,7 @@ int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n * 2;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1690,8 +2429,7 @@ int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
}
ma_indr++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
@@ -1704,7 +2442,7 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
u32 i, j;
int rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulptx_la),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulptx_la),
&temp_buff);
if (rc)
return rc;
@@ -1725,8 +2463,7 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
t4_read_reg(padap,
ULP_TX_LA_RDDATA_0_A + 0x10 * i);
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
@@ -1735,13 +2472,23 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
{
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
+ u32 local_offset, local_range;
struct ireg_buf *up_cim;
+ u32 size, j, iter;
+ u32 instance = 0;
int i, rc, n;
- u32 size;
- n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
+ if (is_t5(padap->params.chip))
+ n = sizeof(t5_up_cim_reg_array) /
+ ((IREG_NUM_ELEM + 1) * sizeof(u32));
+ else if (is_t6(padap->params.chip))
+ n = sizeof(t6_up_cim_reg_array) /
+ ((IREG_NUM_ELEM + 1) * sizeof(u32));
+ else
+ return CUDBG_STATUS_NOT_IMPLEMENTED;
+
size = sizeof(struct ireg_buf) * n;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1757,6 +2504,7 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
t5_up_cim_reg_array[i][2];
up_cim_reg->ireg_offset_range =
t5_up_cim_reg_array[i][3];
+ instance = t5_up_cim_reg_array[i][4];
} else if (is_t6(padap->params.chip)) {
up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
@@ -1764,18 +2512,39 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
t6_up_cim_reg_array[i][2];
up_cim_reg->ireg_offset_range =
t6_up_cim_reg_array[i][3];
+ instance = t6_up_cim_reg_array[i][4];
}
- rc = t4_cim_read(padap, up_cim_reg->ireg_local_offset,
- up_cim_reg->ireg_offset_range, buff);
- if (rc) {
- cudbg_put_buff(&temp_buff, dbg_buff);
- return rc;
+ switch (instance) {
+ case NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES:
+ iter = up_cim_reg->ireg_offset_range;
+ local_offset = 0x120;
+ local_range = 1;
+ break;
+ case NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES:
+ iter = up_cim_reg->ireg_offset_range;
+ local_offset = 0x10;
+ local_range = 1;
+ break;
+ default:
+ iter = 1;
+ local_offset = 0;
+ local_range = up_cim_reg->ireg_offset_range;
+ break;
+ }
+
+ for (j = 0; j < iter; j++, buff++) {
+ rc = t4_cim_read(padap,
+ up_cim_reg->ireg_local_offset +
+ (j * local_offset), local_range, buff);
+ if (rc) {
+ cudbg_put_buff(pdbg_init, &temp_buff);
+ return rc;
+ }
}
up_cim++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
@@ -1788,7 +2557,8 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
int i, rc;
u32 addr;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pbt_tables),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff,
+ sizeof(struct cudbg_pbt_tables),
&temp_buff);
if (rc)
return rc;
@@ -1801,7 +2571,7 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
&pbt->pbt_dynamic[i]);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
}
@@ -1814,7 +2584,7 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
&pbt->pbt_static[i]);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
}
@@ -1826,7 +2596,7 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
&pbt->lrf_table[i]);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
}
@@ -1838,12 +2608,11 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
&pbt->pbt_data[i]);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
@@ -1864,7 +2633,7 @@ int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
log = padap->mbox_log;
mbox_cmds = padap->mbox_log->size;
size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1887,8 +2656,7 @@ int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
}
mboxlog++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
@@ -1906,7 +2674,7 @@ int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1924,6 +2692,5 @@ int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
hma_fli->ireg_local_offset);
hma_indr++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
index caeee8e33e86..eebefe7cd18e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
@@ -75,6 +75,12 @@ int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
+int cudbg_collect_mc0_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_mc1_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
int cudbg_collect_rss(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
@@ -102,6 +108,9 @@ int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
+int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
@@ -123,6 +132,9 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
int cudbg_collect_tid(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
+int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
@@ -156,6 +168,9 @@ int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
+int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i);
void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
@@ -163,7 +178,8 @@ void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
u32 cudbg_cim_obq_size(struct adapter *padap, int qid);
int cudbg_dump_context_size(struct adapter *padap);
-struct cudbg_tcam;
+int cudbg_fill_meminfo(struct adapter *padap,
+ struct cudbg_meminfo *meminfo_buff);
void cudbg_fill_le_tcam_info(struct adapter *padap,
struct cudbg_tcam *tcam_region);
#endif /* __CUDBG_LIB_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h
index 24b33f28e548..8150ea85d6a5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h
@@ -26,6 +26,7 @@ enum cudbg_dump_type {
enum cudbg_compression_type {
CUDBG_COMPRESSION_NONE = 1,
+ CUDBG_COMPRESSION_ZLIB,
};
struct cudbg_hdr {
@@ -78,10 +79,11 @@ struct cudbg_error {
#define CDUMP_MAX_COMP_BUF_SIZE ((64 * 1024) - 1)
#define CUDBG_CHUNK_SIZE ((CDUMP_MAX_COMP_BUF_SIZE / 1024) * 1024)
-int cudbg_get_buff(struct cudbg_buffer *pdbg_buff, u32 size,
+int cudbg_get_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pdbg_buff, u32 size,
struct cudbg_buffer *pin_buff);
-void cudbg_put_buff(struct cudbg_buffer *pin_buff,
- struct cudbg_buffer *pdbg_buff);
+void cudbg_put_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff);
void cudbg_update_buff(struct cudbg_buffer *pin_buff,
struct cudbg_buffer *pout_buff);
#endif /* __CUDBG_LIB_COMMON_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.c
new file mode 100644
index 000000000000..25cc06d75cff
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2018 Chelsio Communications. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include <linux/zlib.h>
+
+#include "cxgb4.h"
+#include "cudbg_if.h"
+#include "cudbg_lib_common.h"
+#include "cudbg_zlib.h"
+
+static int cudbg_get_compress_hdr(struct cudbg_buffer *pdbg_buff,
+ struct cudbg_buffer *pin_buff)
+{
+ if (pdbg_buff->offset + sizeof(struct cudbg_compress_hdr) >
+ pdbg_buff->size)
+ return CUDBG_STATUS_NO_MEM;
+
+ pin_buff->data = (char *)pdbg_buff->data + pdbg_buff->offset;
+ pin_buff->offset = 0;
+ pin_buff->size = sizeof(struct cudbg_compress_hdr);
+ pdbg_buff->offset += sizeof(struct cudbg_compress_hdr);
+ return 0;
+}
+
+int cudbg_compress_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff,
+ struct cudbg_buffer *pout_buff)
+{
+ struct cudbg_buffer temp_buff = { 0 };
+ struct z_stream_s compress_stream;
+ struct cudbg_compress_hdr *c_hdr;
+ int rc;
+
+ /* Write compression header to output buffer before compression */
+ rc = cudbg_get_compress_hdr(pout_buff, &temp_buff);
+ if (rc)
+ return rc;
+
+ c_hdr = (struct cudbg_compress_hdr *)temp_buff.data;
+ c_hdr->compress_id = CUDBG_ZLIB_COMPRESS_ID;
+
+ memset(&compress_stream, 0, sizeof(struct z_stream_s));
+ compress_stream.workspace = pdbg_init->workspace;
+ rc = zlib_deflateInit2(&compress_stream, Z_DEFAULT_COMPRESSION,
+ Z_DEFLATED, CUDBG_ZLIB_WIN_BITS,
+ CUDBG_ZLIB_MEM_LVL, Z_DEFAULT_STRATEGY);
+ if (rc != Z_OK)
+ return CUDBG_SYSTEM_ERROR;
+
+ compress_stream.next_in = pin_buff->data;
+ compress_stream.avail_in = pin_buff->size;
+ compress_stream.next_out = pout_buff->data + pout_buff->offset;
+ compress_stream.avail_out = pout_buff->size - pout_buff->offset;
+
+ rc = zlib_deflate(&compress_stream, Z_FINISH);
+ if (rc != Z_STREAM_END)
+ return CUDBG_SYSTEM_ERROR;
+
+ rc = zlib_deflateEnd(&compress_stream);
+ if (rc != Z_OK)
+ return CUDBG_SYSTEM_ERROR;
+
+ c_hdr->compress_size = compress_stream.total_out;
+ c_hdr->decompress_size = pin_buff->size;
+ pout_buff->offset += compress_stream.total_out;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.h
new file mode 100644
index 000000000000..60d23805dfc3
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2018 Chelsio Communications. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#ifndef __CUDBG_ZLIB_H__
+#define __CUDBG_ZLIB_H__
+
+#include <linux/zlib.h>
+
+#define CUDBG_ZLIB_COMPRESS_ID 17
+#define CUDBG_ZLIB_WIN_BITS 12
+#define CUDBG_ZLIB_MEM_LVL 4
+
+struct cudbg_compress_hdr {
+ u32 compress_id;
+ u64 decompress_size;
+ u64 compress_size;
+ u64 rsvd[32];
+};
+
+static inline int cudbg_get_workspace_size(void)
+{
+ return zlib_deflate_workspacesize(CUDBG_ZLIB_WIN_BITS,
+ CUDBG_ZLIB_MEM_LVL);
+}
+
+int cudbg_compress_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff,
+ struct cudbg_buffer *pout_buff);
+#endif /* __CUDBG_ZLIB_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 351f4bf37ca9..9040e13ce4b7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -84,7 +84,8 @@ enum {
MEM_EDC1,
MEM_MC,
MEM_MC0 = MEM_MC,
- MEM_MC1
+ MEM_MC1,
+ MEM_HMA,
};
enum {
@@ -318,6 +319,7 @@ struct vpd_params {
};
struct pci_params {
+ unsigned int vpd_cap_addr;
unsigned char speed;
unsigned char width;
};
@@ -826,12 +828,17 @@ struct vf_info {
unsigned char vf_mac_addr[ETH_ALEN];
unsigned int tx_rate;
bool pf_set_mac;
+ u16 vlan;
};
struct mbox_list {
struct list_head list;
};
+struct mps_encap_entry {
+ atomic_t refcnt;
+};
+
struct adapter {
void __iomem *regs;
void __iomem *bar2;
@@ -846,6 +853,10 @@ struct adapter {
enum chip_type chip;
int msg_enable;
+ __be16 vxlan_port;
+ u8 vxlan_port_cnt;
+ __be16 geneve_port;
+ u8 geneve_port_cnt;
struct adapter_params params;
struct cxgb4_virt_res vres;
@@ -875,7 +886,10 @@ struct adapter {
unsigned int clipt_start;
unsigned int clipt_end;
struct clip_tbl *clipt;
+ unsigned int rawf_start;
+ unsigned int rawf_cnt;
struct smt_data *smt;
+ struct mps_encap_entry *mps_encap;
struct cxgb4_uld_info *uld;
void *uld_handle[CXGB4_ULD_MAX];
unsigned int num_uld;
@@ -1317,6 +1331,7 @@ void t4_sge_start(struct adapter *adap);
void t4_sge_stop(struct adapter *adap);
void cxgb4_set_ethtool_ops(struct net_device *netdev);
int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
+enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb);
extern int dbfifo_int_thresh;
#define for_each_port(adapter, iter) \
@@ -1435,6 +1450,21 @@ static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
q->size = size;
}
+/**
+ * t4_is_inserted_mod_type - is a plugged in Firmware Module Type
+ * @fw_mod_type: the Firmware Mofule Type
+ *
+ * Return whether the Firmware Module Type represents a real Transceiver
+ * Module/Cable Module Type which has been inserted.
+ */
+static inline bool t4_is_inserted_mod_type(unsigned int fw_mod_type)
+{
+ return (fw_mod_type != FW_PORT_MOD_TYPE_NONE &&
+ fw_mod_type != FW_PORT_MOD_TYPE_NOTSUPPORTED &&
+ fw_mod_type != FW_PORT_MOD_TYPE_UNKNOWN &&
+ fw_mod_type != FW_PORT_MOD_TYPE_ERROR);
+}
+
void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
unsigned int data_reg, const u32 *vals,
unsigned int nregs, unsigned int start_idx);
@@ -1524,6 +1554,7 @@ int t4_init_portinfo(struct port_info *pi, int mbox,
int port, int pf, int vf, u8 mac[]);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
void t4_fatal_err(struct adapter *adapter);
+unsigned int t4_chip_rss_size(struct adapter *adapter);
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
int start, int n, const u16 *rspq, unsigned int nrspq);
int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
@@ -1633,6 +1664,12 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox,
int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
int mtu, int promisc, int all_multi, int bcast, int vlanex,
bool sleep_ok);
+int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok);
+int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok);
int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
unsigned int viid, bool free, unsigned int naddr,
const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
@@ -1665,7 +1702,7 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
-int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox);
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type);
void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl);
int t4_update_port_info(struct port_info *pi);
int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
@@ -1708,6 +1745,9 @@ void t4_uld_mem_free(struct adapter *adap);
int t4_uld_mem_alloc(struct adapter *adap);
void t4_uld_clean_up(struct adapter *adap);
void t4_register_netevent_notifier(void);
+int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
+ unsigned int devid, unsigned int offset,
+ unsigned int len, u8 *buf);
void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
void free_tx_desc(struct adapter *adap, struct sge_txq *q,
unsigned int n, bool unmap);
@@ -1722,4 +1762,6 @@ void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
struct ulptx_sgl *sgl, u64 *end, unsigned int start,
const dma_addr_t *addr);
void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n);
+int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
+ u16 vlan);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
index 29cc625e9833..30485f9a598f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
@@ -18,11 +18,14 @@
#include "t4_regs.h"
#include "cxgb4.h"
#include "cxgb4_cudbg.h"
-#include "cudbg_entity.h"
+#include "cudbg_zlib.h"
static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = {
{ CUDBG_EDC0, cudbg_collect_edc0_meminfo },
{ CUDBG_EDC1, cudbg_collect_edc1_meminfo },
+ { CUDBG_MC0, cudbg_collect_mc0_meminfo },
+ { CUDBG_MC1, cudbg_collect_mc1_meminfo },
+ { CUDBG_HMA, cudbg_collect_hma_meminfo },
};
static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
@@ -53,6 +56,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
{ CUDBG_SGE_INDIRECT, cudbg_collect_sge_indirect },
{ CUDBG_ULPRX_LA, cudbg_collect_ulprx_la },
{ CUDBG_TP_LA, cudbg_collect_tp_la },
+ { CUDBG_MEMINFO, cudbg_collect_meminfo },
{ CUDBG_CIM_PIF_LA, cudbg_collect_cim_pif_la },
{ CUDBG_CLK, cudbg_collect_clk_info },
{ CUDBG_CIM_OBQ_RXQ0, cudbg_collect_obq_sge_rx_q0 },
@@ -60,6 +64,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
{ CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect },
{ CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect },
{ CUDBG_TID_INFO, cudbg_collect_tid },
+ { CUDBG_PCIE_CONFIG, cudbg_collect_pcie_config },
{ CUDBG_DUMP_CONTEXT, cudbg_collect_dump_context },
{ CUDBG_MPS_TCAM, cudbg_collect_mps_tcam },
{ CUDBG_VPD_DATA, cudbg_collect_vpd_data },
@@ -158,8 +163,24 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
}
len = cudbg_mbytes_to_bytes(len);
break;
+ case CUDBG_MC0:
+ value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+ if (value & EXT_MEM0_ENABLE_F) {
+ value = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
+ len = EXT_MEM0_SIZE_G(value);
+ }
+ len = cudbg_mbytes_to_bytes(len);
+ break;
+ case CUDBG_MC1:
+ value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+ if (value & EXT_MEM1_ENABLE_F) {
+ value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
+ len = EXT_MEM1_SIZE_G(value);
+ }
+ len = cudbg_mbytes_to_bytes(len);
+ break;
case CUDBG_RSS:
- len = RSS_NENTRIES * sizeof(u16);
+ len = t4_chip_rss_size(adap) * sizeof(u16);
break;
case CUDBG_RSS_VF_CONF:
len = adap->params.arch.vfcount *
@@ -201,6 +222,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
case CUDBG_TP_LA:
len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
break;
+ case CUDBG_MEMINFO:
+ len = sizeof(struct cudbg_meminfo);
+ break;
case CUDBG_CIM_PIF_LA:
len = sizeof(struct cudbg_cim_pif_la);
len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
@@ -219,6 +243,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
case CUDBG_TID_INFO:
len = sizeof(struct cudbg_tid_info_region_rev1);
break;
+ case CUDBG_PCIE_CONFIG:
+ len = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS;
+ break;
case CUDBG_DUMP_CONTEXT:
len = cudbg_dump_context_size(adap);
break;
@@ -248,7 +275,13 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
len = sizeof(struct cudbg_ulptx_la);
break;
case CUDBG_UP_CIM_INDIRECT:
- n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
+ n = 0;
+ if (is_t5(adap->params.chip))
+ n = sizeof(t5_up_cim_reg_array) /
+ ((IREG_NUM_ELEM + 1) * sizeof(u32));
+ else if (is_t6(adap->params.chip))
+ n = sizeof(t6_up_cim_reg_array) /
+ ((IREG_NUM_ELEM + 1) * sizeof(u32));
len = sizeof(struct ireg_buf) * n;
break;
case CUDBG_PBT_TABLE:
@@ -264,6 +297,17 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
len = sizeof(struct ireg_buf) * n;
}
break;
+ case CUDBG_HMA:
+ value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+ if (value & HMA_MUX_F) {
+ /* In T6, there's no MC1. So, HMA shares MC1
+ * address space.
+ */
+ value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
+ len = EXT_MEM1_SIZE_G(value);
+ }
+ len = cudbg_mbytes_to_bytes(len);
+ break;
default:
break;
}
@@ -275,6 +319,7 @@ u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag)
{
u32 i, entity;
u32 len = 0;
+ u32 wsize;
if (flag & CXGB4_ETH_DUMP_HW) {
for (i = 0; i < ARRAY_SIZE(cxgb4_collect_hw_dump); i++) {
@@ -290,6 +335,11 @@ u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag)
}
}
+ /* If compression is enabled, a smaller destination buffer is enough */
+ wsize = cudbg_get_workspace_size();
+ if (wsize && len > CUDBG_DUMP_BUFF_SIZE)
+ len = CUDBG_DUMP_BUFF_SIZE;
+
return len;
}
@@ -298,22 +348,14 @@ static void cxgb4_cudbg_collect_entity(struct cudbg_init *pdbg_init,
const struct cxgb4_collect_entity *e_arr,
u32 arr_size, void *buf, u32 *tot_size)
{
- struct adapter *adap = pdbg_init->adap;
struct cudbg_error cudbg_err = { 0 };
struct cudbg_entity_hdr *entity_hdr;
- u32 entity_size, i;
- u32 total_size = 0;
+ u32 i, total_size = 0;
int ret;
for (i = 0; i < arr_size; i++) {
const struct cxgb4_collect_entity *e = &e_arr[i];
- /* Skip entities that won't fit in output buffer */
- entity_size = cxgb4_get_entity_length(adap, e->entity);
- if (entity_size >
- pdbg_init->outbuf_size - *tot_size - total_size)
- continue;
-
entity_hdr = cudbg_get_entity_hdr(buf, e->entity);
entity_hdr->entity_type = e->entity;
entity_hdr->start_offset = dbg_buff->offset;
@@ -339,16 +381,40 @@ static void cxgb4_cudbg_collect_entity(struct cudbg_init *pdbg_init,
*tot_size += total_size;
}
+static int cudbg_alloc_compress_buff(struct cudbg_init *pdbg_init)
+{
+ u32 workspace_size;
+
+ workspace_size = cudbg_get_workspace_size();
+ pdbg_init->compress_buff = vzalloc(CUDBG_COMPRESS_BUFF_SIZE +
+ workspace_size);
+ if (!pdbg_init->compress_buff)
+ return -ENOMEM;
+
+ pdbg_init->compress_buff_size = CUDBG_COMPRESS_BUFF_SIZE;
+ pdbg_init->workspace = (u8 *)pdbg_init->compress_buff +
+ CUDBG_COMPRESS_BUFF_SIZE - workspace_size;
+ return 0;
+}
+
+static void cudbg_free_compress_buff(struct cudbg_init *pdbg_init)
+{
+ if (pdbg_init->compress_buff)
+ vfree(pdbg_init->compress_buff);
+}
+
int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
u32 flag)
{
- struct cudbg_init cudbg_init = { 0 };
struct cudbg_buffer dbg_buff = { 0 };
u32 size, min_size, total_size = 0;
+ struct cudbg_init cudbg_init;
struct cudbg_hdr *cudbg_hdr;
+ int rc;
size = *buf_size;
+ memset(&cudbg_init, 0, sizeof(struct cudbg_init));
cudbg_init.adap = adap;
cudbg_init.outbuf = buf;
cudbg_init.outbuf_size = size;
@@ -365,7 +431,6 @@ int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
cudbg_hdr->max_entities = CUDBG_MAX_ENTITY;
cudbg_hdr->chip_ver = adap->params.chip;
cudbg_hdr->dump_type = CUDBG_DUMP_TYPE_MINI;
- cudbg_hdr->compress_type = CUDBG_COMPRESSION_NONE;
min_size = sizeof(struct cudbg_hdr) +
sizeof(struct cudbg_entity_hdr) *
@@ -373,6 +438,24 @@ int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
if (size < min_size)
return -ENOMEM;
+ rc = cudbg_get_workspace_size();
+ if (rc) {
+ /* Zlib available. So, use zlib deflate */
+ cudbg_init.compress_type = CUDBG_COMPRESSION_ZLIB;
+ rc = cudbg_alloc_compress_buff(&cudbg_init);
+ if (rc) {
+ /* Ignore error and continue without compression. */
+ dev_warn(adap->pdev_dev,
+ "Fail allocating compression buffer ret: %d. Continuing without compression.\n",
+ rc);
+ cudbg_init.compress_type = CUDBG_COMPRESSION_NONE;
+ rc = 0;
+ }
+ } else {
+ cudbg_init.compress_type = CUDBG_COMPRESSION_NONE;
+ }
+
+ cudbg_hdr->compress_type = cudbg_init.compress_type;
dbg_buff.offset += min_size;
total_size = dbg_buff.offset;
@@ -390,8 +473,12 @@ int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
buf,
&total_size);
+ cudbg_free_compress_buff(&cudbg_init);
cudbg_hdr->data_len = total_size;
- *buf_size = total_size;
+ if (cudbg_init.compress_type != CUDBG_COMPRESSION_NONE)
+ *buf_size = size;
+ else
+ *buf_size = total_size;
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
index c099b5aa2214..ce1ac9a1c878 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
@@ -20,8 +20,12 @@
#include "cudbg_if.h"
#include "cudbg_lib_common.h"
+#include "cudbg_entity.h"
#include "cudbg_lib.h"
+#define CUDBG_DUMP_BUFF_SIZE (32 * 1024 * 1024) /* 32 MB */
+#define CUDBG_COMPRESS_BUFF_SIZE (4 * 1024 * 1024) /* 4 MB */
+
typedef int (*cudbg_collect_callback_t)(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index cf471831ee71..2822bbff73e8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -45,6 +45,10 @@
#include "cxgb4_debugfs.h"
#include "clip_tbl.h"
#include "l2t.h"
+#include "cudbg_if.h"
+#include "cudbg_lib_common.h"
+#include "cudbg_entity.h"
+#include "cudbg_lib.h"
/* generic seq_file support for showing a table of size rows x width. */
static void *seq_tab_get_idx(struct seq_tab *tb, loff_t pos)
@@ -1739,7 +1743,7 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
*/
if (lookup_type && (lookup_type != DATALKPTYPE_M)) {
/* Inner header VNI */
- vniy = ((data2 & DATAVIDH2_F) << 23) |
+ vniy = (data2 & DATAVIDH2_F) |
(DATAVIDH1_G(data2) << 16) | VIDL_G(val);
dip_hit = data2 & DATADIPHIT_F;
} else {
@@ -1749,6 +1753,7 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
port_num = DATAPORTNUM_G(data2);
/* Read tcamx. Change the control param */
+ vnix = 0;
ctl |= CTLXYBITSEL_V(1);
t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
@@ -1757,7 +1762,7 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
data2 = t4_read_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A);
if (lookup_type && (lookup_type != DATALKPTYPE_M)) {
/* Inner header VNI mask */
- vnix = ((data2 & DATAVIDH2_F) << 23) |
+ vnix = (data2 & DATAVIDH2_F) |
(DATAVIDH1_G(data2) << 16) | VIDL_G(val);
}
} else {
@@ -1830,7 +1835,8 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
addr[1], addr[2], addr[3],
addr[4], addr[5],
(unsigned long long)mask,
- vniy, vnix, dip_hit ? 'Y' : 'N',
+ vniy, (vnix | vniy),
+ dip_hit ? 'Y' : 'N',
port_num,
(cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N',
PORTMAP_G(cls_hi),
@@ -2017,11 +2023,12 @@ static int rss_show(struct seq_file *seq, void *v, int idx)
static int rss_open(struct inode *inode, struct file *file)
{
- int ret;
- struct seq_tab *p;
struct adapter *adap = inode->i_private;
+ int ret, nentries;
+ struct seq_tab *p;
- p = seq_open_tab(file, RSS_NENTRIES / 8, 8 * sizeof(u16), 0, rss_show);
+ nentries = t4_chip_rss_size(adap);
+ p = seq_open_tab(file, nentries / 8, 8 * sizeof(u16), 0, rss_show);
if (!p)
return -ENOMEM;
@@ -2664,10 +2671,14 @@ static const struct file_operations mem_debugfs_fops = {
static int tid_info_show(struct seq_file *seq, void *v)
{
+ unsigned int tid_start = 0;
struct adapter *adap = seq->private;
const struct tid_info *t = &adap->tids;
enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
+ if (chip > CHELSIO_T5)
+ tid_start = t4_read_reg(adap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
+
if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
unsigned int sb;
seq_printf(seq, "Connections in use: %u\n",
@@ -2679,8 +2690,8 @@ static int tid_info_show(struct seq_file *seq, void *v)
sb = t4_read_reg(adap, LE_DB_SRVR_START_INDEX_A);
if (sb) {
- seq_printf(seq, "TID range: 0..%u/%u..%u", sb - 1,
- adap->tids.hash_base,
+ seq_printf(seq, "TID range: %u..%u/%u..%u", tid_start,
+ sb - 1, adap->tids.hash_base,
t->ntids - 1);
seq_printf(seq, ", in use: %u/%u\n",
atomic_read(&t->tids_in_use),
@@ -2705,7 +2716,8 @@ static int tid_info_show(struct seq_file *seq, void *v)
seq_printf(seq, "Connections in use: %u\n",
atomic_read(&t->conns_in_use));
- seq_printf(seq, "TID range: 0..%u", t->ntids - 1);
+ seq_printf(seq, "TID range: %u..%u", tid_start,
+ tid_start + t->ntids - 1);
seq_printf(seq, ", in use: %u\n",
atomic_read(&t->tids_in_use));
}
@@ -2794,18 +2806,6 @@ static const struct file_operations blocked_fl_fops = {
.llseek = generic_file_llseek,
};
-struct mem_desc {
- unsigned int base;
- unsigned int limit;
- unsigned int idx;
-};
-
-static int mem_desc_cmp(const void *a, const void *b)
-{
- return ((const struct mem_desc *)a)->base -
- ((const struct mem_desc *)b)->base;
-}
-
static void mem_region_show(struct seq_file *seq, const char *name,
unsigned int from, unsigned int to)
{
@@ -2819,250 +2819,60 @@ static void mem_region_show(struct seq_file *seq, const char *name,
static int meminfo_show(struct seq_file *seq, void *v)
{
static const char * const memory[] = { "EDC0:", "EDC1:", "MC:",
- "MC0:", "MC1:"};
- static const char * const region[] = {
- "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
- "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
- "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
- "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
- "RQUDP region:", "PBL region:", "TXPBL region:",
- "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
- "On-chip queues:"
- };
-
- int i, n;
- u32 lo, hi, used, alloc;
- struct mem_desc avail[4];
- struct mem_desc mem[ARRAY_SIZE(region) + 3]; /* up to 3 holes */
- struct mem_desc *md = mem;
+ "MC0:", "MC1:", "HMA:"};
struct adapter *adap = seq->private;
+ struct cudbg_meminfo meminfo;
+ int i, rc;
- for (i = 0; i < ARRAY_SIZE(mem); i++) {
- mem[i].limit = 0;
- mem[i].idx = i;
- }
-
- /* Find and sort the populated memory ranges */
- i = 0;
- lo = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
- if (lo & EDRAM0_ENABLE_F) {
- hi = t4_read_reg(adap, MA_EDRAM0_BAR_A);
- avail[i].base = EDRAM0_BASE_G(hi) << 20;
- avail[i].limit = avail[i].base + (EDRAM0_SIZE_G(hi) << 20);
- avail[i].idx = 0;
- i++;
- }
- if (lo & EDRAM1_ENABLE_F) {
- hi = t4_read_reg(adap, MA_EDRAM1_BAR_A);
- avail[i].base = EDRAM1_BASE_G(hi) << 20;
- avail[i].limit = avail[i].base + (EDRAM1_SIZE_G(hi) << 20);
- avail[i].idx = 1;
- i++;
- }
-
- if (is_t5(adap->params.chip)) {
- if (lo & EXT_MEM0_ENABLE_F) {
- hi = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
- avail[i].base = EXT_MEM0_BASE_G(hi) << 20;
- avail[i].limit =
- avail[i].base + (EXT_MEM0_SIZE_G(hi) << 20);
- avail[i].idx = 3;
- i++;
- }
- if (lo & EXT_MEM1_ENABLE_F) {
- hi = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
- avail[i].base = EXT_MEM1_BASE_G(hi) << 20;
- avail[i].limit =
- avail[i].base + (EXT_MEM1_SIZE_G(hi) << 20);
- avail[i].idx = 4;
- i++;
- }
- } else {
- if (lo & EXT_MEM_ENABLE_F) {
- hi = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
- avail[i].base = EXT_MEM_BASE_G(hi) << 20;
- avail[i].limit =
- avail[i].base + (EXT_MEM_SIZE_G(hi) << 20);
- avail[i].idx = 2;
- i++;
- }
- }
- if (!i) /* no memory available */
- return 0;
- sort(avail, i, sizeof(struct mem_desc), mem_desc_cmp, NULL);
-
- (md++)->base = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A);
- (md++)->base = t4_read_reg(adap, SGE_IMSG_CTXT_BADDR_A);
- (md++)->base = t4_read_reg(adap, SGE_FLM_CACHE_BADDR_A);
- (md++)->base = t4_read_reg(adap, TP_CMM_TCB_BASE_A);
- (md++)->base = t4_read_reg(adap, TP_CMM_MM_BASE_A);
- (md++)->base = t4_read_reg(adap, TP_CMM_TIMER_BASE_A);
- (md++)->base = t4_read_reg(adap, TP_CMM_MM_RX_FLST_BASE_A);
- (md++)->base = t4_read_reg(adap, TP_CMM_MM_TX_FLST_BASE_A);
- (md++)->base = t4_read_reg(adap, TP_CMM_MM_PS_FLST_BASE_A);
-
- /* the next few have explicit upper bounds */
- md->base = t4_read_reg(adap, TP_PMM_TX_BASE_A);
- md->limit = md->base - 1 +
- t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A) *
- PMTXMAXPAGE_G(t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A));
- md++;
-
- md->base = t4_read_reg(adap, TP_PMM_RX_BASE_A);
- md->limit = md->base - 1 +
- t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) *
- PMRXMAXPAGE_G(t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A));
- md++;
-
- if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
- if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) {
- hi = t4_read_reg(adap, LE_DB_TID_HASHBASE_A) / 4;
- md->base = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
- } else {
- hi = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
- md->base = t4_read_reg(adap,
- LE_DB_HASH_TBL_BASE_ADDR_A);
- }
- md->limit = 0;
- } else {
- md->base = 0;
- md->idx = ARRAY_SIZE(region); /* hide it */
- }
- md++;
-
-#define ulp_region(reg) do { \
- md->base = t4_read_reg(adap, ULP_ ## reg ## _LLIMIT_A);\
- (md++)->limit = t4_read_reg(adap, ULP_ ## reg ## _ULIMIT_A); \
-} while (0)
-
- ulp_region(RX_ISCSI);
- ulp_region(RX_TDDP);
- ulp_region(TX_TPT);
- ulp_region(RX_STAG);
- ulp_region(RX_RQ);
- ulp_region(RX_RQUDP);
- ulp_region(RX_PBL);
- ulp_region(TX_PBL);
-#undef ulp_region
- md->base = 0;
- md->idx = ARRAY_SIZE(region);
- if (!is_t4(adap->params.chip)) {
- u32 size = 0;
- u32 sge_ctrl = t4_read_reg(adap, SGE_CONTROL2_A);
- u32 fifo_size = t4_read_reg(adap, SGE_DBVFIFO_SIZE_A);
-
- if (is_t5(adap->params.chip)) {
- if (sge_ctrl & VFIFO_ENABLE_F)
- size = DBVFIFO_SIZE_G(fifo_size);
- } else {
- size = T6_DBVFIFO_SIZE_G(fifo_size);
- }
-
- if (size) {
- md->base = BASEADDR_G(t4_read_reg(adap,
- SGE_DBVFIFO_BADDR_A));
- md->limit = md->base + (size << 2) - 1;
- }
- }
-
- md++;
-
- md->base = t4_read_reg(adap, ULP_RX_CTX_BASE_A);
- md->limit = 0;
- md++;
- md->base = t4_read_reg(adap, ULP_TX_ERR_TABLE_BASE_A);
- md->limit = 0;
- md++;
-
- md->base = adap->vres.ocq.start;
- if (adap->vres.ocq.size)
- md->limit = md->base + adap->vres.ocq.size - 1;
- else
- md->idx = ARRAY_SIZE(region); /* hide it */
- md++;
-
- /* add any address-space holes, there can be up to 3 */
- for (n = 0; n < i - 1; n++)
- if (avail[n].limit < avail[n + 1].base)
- (md++)->base = avail[n].limit;
- if (avail[n].limit)
- (md++)->base = avail[n].limit;
-
- n = md - mem;
- sort(mem, n, sizeof(struct mem_desc), mem_desc_cmp, NULL);
+ memset(&meminfo, 0, sizeof(struct cudbg_meminfo));
+ rc = cudbg_fill_meminfo(adap, &meminfo);
+ if (rc)
+ return -ENXIO;
- for (lo = 0; lo < i; lo++)
- mem_region_show(seq, memory[avail[lo].idx], avail[lo].base,
- avail[lo].limit - 1);
+ for (i = 0; i < meminfo.avail_c; i++)
+ mem_region_show(seq, memory[meminfo.avail[i].idx],
+ meminfo.avail[i].base,
+ meminfo.avail[i].limit - 1);
seq_putc(seq, '\n');
- for (i = 0; i < n; i++) {
- if (mem[i].idx >= ARRAY_SIZE(region))
+ for (i = 0; i < meminfo.mem_c; i++) {
+ if (meminfo.mem[i].idx >= ARRAY_SIZE(cudbg_region))
continue; /* skip holes */
- if (!mem[i].limit)
- mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
- mem_region_show(seq, region[mem[i].idx], mem[i].base,
- mem[i].limit);
+ if (!meminfo.mem[i].limit)
+ meminfo.mem[i].limit =
+ i < meminfo.mem_c - 1 ?
+ meminfo.mem[i + 1].base - 1 : ~0;
+ mem_region_show(seq, cudbg_region[meminfo.mem[i].idx],
+ meminfo.mem[i].base, meminfo.mem[i].limit);
}
seq_putc(seq, '\n');
- lo = t4_read_reg(adap, CIM_SDRAM_BASE_ADDR_A);
- hi = t4_read_reg(adap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
- mem_region_show(seq, "uP RAM:", lo, hi);
-
- lo = t4_read_reg(adap, CIM_EXTMEM2_BASE_ADDR_A);
- hi = t4_read_reg(adap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
- mem_region_show(seq, "uP Extmem2:", lo, hi);
+ mem_region_show(seq, "uP RAM:", meminfo.up_ram_lo, meminfo.up_ram_hi);
+ mem_region_show(seq, "uP Extmem2:", meminfo.up_extmem2_lo,
+ meminfo.up_extmem2_hi);
- lo = t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A);
seq_printf(seq, "\n%u Rx pages of size %uKiB for %u channels\n",
- PMRXMAXPAGE_G(lo),
- t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) >> 10,
- (lo & PMRXNUMCHN_F) ? 2 : 1);
+ meminfo.rx_pages_data[0], meminfo.rx_pages_data[1],
+ meminfo.rx_pages_data[2]);
- lo = t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A);
- hi = t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A);
seq_printf(seq, "%u Tx pages of size %u%ciB for %u channels\n",
- PMTXMAXPAGE_G(lo),
- hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
- hi >= (1 << 20) ? 'M' : 'K', 1 << PMTXNUMCHN_G(lo));
- seq_printf(seq, "%u p-structs\n\n",
- t4_read_reg(adap, TP_CMM_MM_MAX_PSTRUCT_A));
-
- for (i = 0; i < 4; i++) {
- if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
- lo = t4_read_reg(adap, MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
- else
- lo = t4_read_reg(adap, MPS_RX_PG_RSV0_A + i * 4);
- if (is_t5(adap->params.chip)) {
- used = T5_USED_G(lo);
- alloc = T5_ALLOC_G(lo);
- } else {
- used = USED_G(lo);
- alloc = ALLOC_G(lo);
- }
+ meminfo.tx_pages_data[0], meminfo.tx_pages_data[1],
+ meminfo.tx_pages_data[2], meminfo.tx_pages_data[3]);
+
+ seq_printf(seq, "%u p-structs\n\n", meminfo.p_structs);
+
+ for (i = 0; i < 4; i++)
/* For T6 these are MAC buffer groups */
seq_printf(seq, "Port %d using %u pages out of %u allocated\n",
- i, used, alloc);
- }
- for (i = 0; i < adap->params.arch.nchan; i++) {
- if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
- lo = t4_read_reg(adap,
- MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
- else
- lo = t4_read_reg(adap, MPS_RX_PG_RSV4_A + i * 4);
- if (is_t5(adap->params.chip)) {
- used = T5_USED_G(lo);
- alloc = T5_ALLOC_G(lo);
- } else {
- used = USED_G(lo);
- alloc = ALLOC_G(lo);
- }
+ i, meminfo.port_used[i], meminfo.port_alloc[i]);
+
+ for (i = 0; i < adap->params.arch.nchan; i++)
/* For T6 these are MAC buffer groups */
seq_printf(seq,
"Loopback %d using %u pages out of %u allocated\n",
- i, used, alloc);
- }
+ i, meminfo.loopback_used[i],
+ meminfo.loopback_alloc[i]);
+
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index eb338212f5af..7852d98bad75 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -517,7 +517,8 @@ static int from_fw_port_mod_type(enum fw_port_type port_type,
else
return PORT_OTHER;
} else if (port_type == FW_PORT_TYPE_KR4_100G ||
- port_type == FW_PORT_TYPE_KR_SFP28) {
+ port_type == FW_PORT_TYPE_KR_SFP28 ||
+ port_type == FW_PORT_TYPE_KR_XLAUI) {
return PORT_NONE;
}
@@ -645,6 +646,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
FW_CAPS_TO_LMM(SPEED_25G, 25000baseKR_Full);
break;
+ case FW_PORT_TYPE_KR_XLAUI:
+ SET_LMM(Backplane);
+ FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
+ FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full);
+ break;
+
case FW_PORT_TYPE_CR2_QSFP:
SET_LMM(FIBRE);
SET_LMM(50000baseSR2_Full);
@@ -1396,6 +1404,101 @@ static int get_dump_data(struct net_device *dev, struct ethtool_dump *eth_dump,
return 0;
}
+static int cxgb4_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct port_info *pi = netdev_priv(dev);
+ u8 sff8472_comp, sff_diag_type, sff_rev;
+ struct adapter *adapter = pi->adapter;
+ int ret;
+
+ if (!t4_is_inserted_mod_type(pi->mod_type))
+ return -EINVAL;
+
+ switch (pi->port_type) {
+ case FW_PORT_TYPE_SFP:
+ case FW_PORT_TYPE_QSA:
+ case FW_PORT_TYPE_SFP28:
+ ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+ I2C_DEV_ADDR_A0, SFF_8472_COMP_ADDR,
+ SFF_8472_COMP_LEN, &sff8472_comp);
+ if (ret)
+ return ret;
+ ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+ I2C_DEV_ADDR_A0, SFP_DIAG_TYPE_ADDR,
+ SFP_DIAG_TYPE_LEN, &sff_diag_type);
+ if (ret)
+ return ret;
+
+ if (!sff8472_comp || (sff_diag_type & 4)) {
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ }
+ break;
+
+ case FW_PORT_TYPE_QSFP:
+ case FW_PORT_TYPE_QSFP_10G:
+ case FW_PORT_TYPE_CR_QSFP:
+ case FW_PORT_TYPE_CR2_QSFP:
+ case FW_PORT_TYPE_CR4_QSFP:
+ ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+ I2C_DEV_ADDR_A0, SFF_REV_ADDR,
+ SFF_REV_LEN, &sff_rev);
+ /* For QSFP type ports, revision value >= 3
+ * means the SFP is 8636 compliant.
+ */
+ if (ret)
+ return ret;
+ if (sff_rev >= 0x3) {
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cxgb4_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eprom, u8 *data)
+{
+ int ret = 0, offset = eprom->offset, len = eprom->len;
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ memset(data, 0, eprom->len);
+ if (offset + len <= I2C_PAGE_SIZE)
+ return t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+ I2C_DEV_ADDR_A0, offset, len, data);
+
+ /* offset + len spans 0xa0 and 0xa1 pages */
+ if (offset <= I2C_PAGE_SIZE) {
+ /* read 0xa0 page */
+ len = I2C_PAGE_SIZE - offset;
+ ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+ I2C_DEV_ADDR_A0, offset, len, data);
+ if (ret)
+ return ret;
+ offset = I2C_PAGE_SIZE;
+ /* Remaining bytes to be read from second page =
+ * Total length - bytes read from first page
+ */
+ len = eprom->len - len;
+ }
+ /* Read additional optical diagnostics from page 0xa2 if supported */
+ return t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan, I2C_DEV_ADDR_A2,
+ offset, len, &data[eprom->len - len]);
+}
+
static const struct ethtool_ops cxgb_ethtool_ops = {
.get_link_ksettings = get_link_ksettings,
.set_link_ksettings = set_link_ksettings,
@@ -1430,6 +1533,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.set_dump = set_dump,
.get_dump_flag = get_dump_flag,
.get_dump_data = get_dump_data,
+ .get_module_info = cxgb4_get_module_info,
+ .get_module_eeprom = cxgb4_get_module_eeprom,
};
void cxgb4_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 5980f308a253..3177b0c9bd2d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -439,19 +439,32 @@ int cxgb4_get_free_ftid(struct net_device *dev, int family)
if (ftid >= t->nftids)
ftid = -1;
} else {
- ftid = bitmap_find_free_region(t->ftid_bmap, t->nftids, 2);
- if (ftid < 0)
- goto out_unlock;
+ if (is_t6(adap->params.chip)) {
+ ftid = bitmap_find_free_region(t->ftid_bmap,
+ t->nftids, 1);
+ if (ftid < 0)
+ goto out_unlock;
+
+ /* this is only a lookup, keep the found region
+ * unallocated
+ */
+ bitmap_release_region(t->ftid_bmap, ftid, 1);
+ } else {
+ ftid = bitmap_find_free_region(t->ftid_bmap,
+ t->nftids, 2);
+ if (ftid < 0)
+ goto out_unlock;
- /* this is only a lookup, keep the found region unallocated */
- bitmap_release_region(t->ftid_bmap, ftid, 2);
+ bitmap_release_region(t->ftid_bmap, ftid, 2);
+ }
}
out_unlock:
spin_unlock_bh(&t->ftid_lock);
return ftid;
}
-static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family)
+static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family,
+ unsigned int chip_ver)
{
spin_lock_bh(&t->ftid_lock);
@@ -460,22 +473,31 @@ static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family)
return -EBUSY;
}
- if (family == PF_INET)
+ if (family == PF_INET) {
__set_bit(fidx, t->ftid_bmap);
- else
- bitmap_allocate_region(t->ftid_bmap, fidx, 2);
+ } else {
+ if (chip_ver < CHELSIO_T6)
+ bitmap_allocate_region(t->ftid_bmap, fidx, 2);
+ else
+ bitmap_allocate_region(t->ftid_bmap, fidx, 1);
+ }
spin_unlock_bh(&t->ftid_lock);
return 0;
}
-static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family)
+static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family,
+ unsigned int chip_ver)
{
spin_lock_bh(&t->ftid_lock);
- if (family == PF_INET)
+ if (family == PF_INET) {
__clear_bit(fidx, t->ftid_bmap);
- else
- bitmap_release_region(t->ftid_bmap, fidx, 2);
+ } else {
+ if (chip_ver < CHELSIO_T6)
+ bitmap_release_region(t->ftid_bmap, fidx, 2);
+ else
+ bitmap_release_region(t->ftid_bmap, fidx, 1);
+ }
spin_unlock_bh(&t->ftid_lock);
}
@@ -694,7 +716,7 @@ void clear_filter(struct adapter *adap, struct filter_entry *f)
if (f->smt)
cxgb4_smt_release(f->smt);
- if (f->fs.hash && f->fs.type)
+ if ((f->fs.hash || is_t6(adap->params.chip)) && f->fs.type)
cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
/* The zeroing of the filter rule below clears the filter valid,
@@ -1189,6 +1211,7 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
struct filter_ctx *ctx)
{
struct adapter *adapter = netdev2adap(dev);
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
unsigned int max_fidx, fidx;
struct filter_entry *f;
u32 iconf;
@@ -1225,12 +1248,18 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
* insertion.
*/
if (fs->type == 0) { /* IPv4 */
- /* If our IPv4 filter isn't being written to a
- * multiple of four filter index and there's an IPv6
- * filter at the multiple of 4 base slot, then we
- * prevent insertion.
+ /* For T6, If our IPv4 filter isn't being written to a
+ * multiple of two filter index and there's an IPv6
+ * filter at the multiple of 2 base slot, then we need
+ * to delete that IPv6 filter ...
+ * For adapters below T6, IPv6 filter occupies 4 entries.
+ * Hence we need to delete the filter in multiple of 4 slot.
*/
- fidx = filter_id & ~0x3;
+ if (chip_ver < CHELSIO_T6)
+ fidx = filter_id & ~0x3;
+ else
+ fidx = filter_id & ~0x1;
+
if (fidx != filter_id &&
adapter->tids.ftid_tab[fidx].fs.type) {
f = &adapter->tids.ftid_tab[fidx];
@@ -1242,23 +1271,42 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
}
}
} else { /* IPv6 */
- /* Ensure that the IPv6 filter is aligned on a
- * multiple of 4 boundary.
- */
- if (filter_id & 0x3) {
- dev_err(adapter->pdev_dev,
- "Invalid location. IPv6 must be aligned on a 4-slot boundary\n");
- return -EINVAL;
- }
+ if (chip_ver < CHELSIO_T6) {
+ /* Ensure that the IPv6 filter is aligned on a
+ * multiple of 4 boundary.
+ */
+ if (filter_id & 0x3) {
+ dev_err(adapter->pdev_dev,
+ "Invalid location. IPv6 must be aligned on a 4-slot boundary\n");
+ return -EINVAL;
+ }
- /* Check all except the base overlapping IPv4 filter slots. */
- for (fidx = filter_id + 1; fidx < filter_id + 4; fidx++) {
+ /* Check all except the base overlapping IPv4 filter
+ * slots.
+ */
+ for (fidx = filter_id + 1; fidx < filter_id + 4;
+ fidx++) {
+ f = &adapter->tids.ftid_tab[fidx];
+ if (f->valid) {
+ dev_err(adapter->pdev_dev,
+ "Invalid location. IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
+ fidx);
+ return -EBUSY;
+ }
+ }
+ } else {
+ /* For T6, CLIP being enabled, IPv6 filter would occupy
+ * 2 entries.
+ */
+ if (filter_id & 0x1)
+ return -EINVAL;
+ /* Check overlapping IPv4 filter slot */
+ fidx = filter_id + 1;
f = &adapter->tids.ftid_tab[fidx];
if (f->valid) {
- dev_err(adapter->pdev_dev,
- "Invalid location. IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
- fidx);
- return -EINVAL;
+ pr_err("%s: IPv6 filter requires 2 indices. IPv4 filter already present at %d. Please remove IPv4 filter first.\n",
+ __func__, fidx);
+ return -EBUSY;
}
}
}
@@ -1272,16 +1320,18 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
fidx = filter_id + adapter->tids.ftid_base;
ret = cxgb4_set_ftid(&adapter->tids, filter_id,
- fs->type ? PF_INET6 : PF_INET);
+ fs->type ? PF_INET6 : PF_INET,
+ chip_ver);
if (ret)
return ret;
- /* Check to make sure the filter requested is writable ... */
+ /* Check t make sure the filter requested is writable ... */
ret = writable_filter(f);
if (ret) {
/* Clear the bits we have set above */
cxgb4_clear_ftid(&adapter->tids, filter_id,
- fs->type ? PF_INET6 : PF_INET);
+ fs->type ? PF_INET6 : PF_INET,
+ chip_ver);
return ret;
}
@@ -1291,6 +1341,17 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
if (f->valid)
clear_filter(adapter, f);
+ if (is_t6(adapter->params.chip) && fs->type &&
+ ipv6_addr_type((const struct in6_addr *)fs->val.lip) !=
+ IPV6_ADDR_ANY) {
+ ret = cxgb4_clip_get(dev, (const u32 *)&fs->val.lip, 1);
+ if (ret) {
+ cxgb4_clear_ftid(&adapter->tids, filter_id, PF_INET6,
+ chip_ver);
+ return ret;
+ }
+ }
+
/* Convert the filter specification into our internal format.
* We copy the PF/VF specification into the Outer VLAN field
* here so the rest of the code -- including the interface to
@@ -1316,7 +1377,8 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
ret = set_filter_wr(adapter, filter_id);
if (ret) {
cxgb4_clear_ftid(&adapter->tids, filter_id,
- fs->type ? PF_INET6 : PF_INET);
+ fs->type ? PF_INET6 : PF_INET,
+ chip_ver);
clear_filter(adapter, f);
}
@@ -1394,6 +1456,7 @@ int __cxgb4_del_filter(struct net_device *dev, int filter_id,
struct filter_ctx *ctx)
{
struct adapter *adapter = netdev2adap(dev);
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
struct filter_entry *f;
unsigned int max_fidx;
int ret;
@@ -1419,7 +1482,8 @@ int __cxgb4_del_filter(struct net_device *dev, int filter_id,
if (f->valid) {
f->ctx = ctx;
cxgb4_clear_ftid(&adapter->tids, filter_id,
- f->fs.type ? PF_INET6 : PF_INET);
+ f->fs.type ? PF_INET6 : PF_INET,
+ chip_ver);
return del_filter_wr(adapter, filter_id);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 05a4abfd5ec1..1ca2a39ed0f8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -65,6 +65,7 @@
#include <net/addrconf.h>
#include <linux/uaccess.h>
#include <linux/crash_dump.h>
+#include <net/udp_tunnel.h>
#include "cxgb4.h"
#include "cxgb4_filter.h"
@@ -101,7 +102,9 @@ const char cxgb4_driver_version[] = DRV_VERSION;
*/
#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
static const struct pci_device_id cxgb4_pci_tbl[] = {
-#define CH_PCI_DEVICE_ID_FUNCTION 0x4
+#define CXGB4_UNIFIED_PF 0x4
+
+#define CH_PCI_DEVICE_ID_FUNCTION CXGB4_UNIFIED_PF
/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
* called for both.
@@ -109,7 +112,7 @@ const char cxgb4_driver_version[] = DRV_VERSION;
#define CH_PCI_DEVICE_ID_FUNCTION2 0x0
#define CH_PCI_ID_TABLE_ENTRY(devid) \
- {PCI_VDEVICE(CHELSIO, (devid)), 4}
+ {PCI_VDEVICE(CHELSIO, (devid)), CXGB4_UNIFIED_PF}
#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
{ 0, } \
@@ -1673,7 +1676,7 @@ int cxgb4_flush_eq_cache(struct net_device *dev)
{
struct adapter *adap = netdev2adap(dev);
- return t4_sge_ctxt_flush(adap, adap->mbox);
+ return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS);
}
EXPORT_SYMBOL(cxgb4_flush_eq_cache);
@@ -2604,7 +2607,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
}
#ifdef CONFIG_PCI_IOV
-static int dummy_open(struct net_device *dev)
+static int cxgb4_mgmt_open(struct net_device *dev)
{
/* Turn carrier off since we don't have to transmit anything on this
* interface.
@@ -2614,39 +2617,44 @@ static int dummy_open(struct net_device *dev)
}
/* Fill MAC address that will be assigned by the FW */
-static void fill_vf_station_mac_addr(struct adapter *adap)
+static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap)
{
- unsigned int i;
u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN];
+ unsigned int i, vf, nvfs;
+ u16 a, b;
int err;
u8 *na;
- u16 a, b;
+ adap->params.pci.vpd_cap_addr = pci_find_capability(adap->pdev,
+ PCI_CAP_ID_VPD);
err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
- if (!err) {
- na = adap->params.vpd.na;
- for (i = 0; i < ETH_ALEN; i++)
- hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
- hex2val(na[2 * i + 1]));
- a = (hw_addr[0] << 8) | hw_addr[1];
- b = (hw_addr[1] << 8) | hw_addr[2];
- a ^= b;
- a |= 0x0200; /* locally assigned Ethernet MAC address */
- a &= ~0x0100; /* not a multicast Ethernet MAC address */
- macaddr[0] = a >> 8;
- macaddr[1] = a & 0xff;
-
- for (i = 2; i < 5; i++)
- macaddr[i] = hw_addr[i + 1];
-
- for (i = 0; i < adap->num_vfs; i++) {
- macaddr[5] = adap->pf * 16 + i;
- ether_addr_copy(adap->vfinfo[i].vf_mac_addr, macaddr);
- }
+ if (err)
+ return;
+
+ na = adap->params.vpd.na;
+ for (i = 0; i < ETH_ALEN; i++)
+ hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
+ hex2val(na[2 * i + 1]));
+
+ a = (hw_addr[0] << 8) | hw_addr[1];
+ b = (hw_addr[1] << 8) | hw_addr[2];
+ a ^= b;
+ a |= 0x0200; /* locally assigned Ethernet MAC address */
+ a &= ~0x0100; /* not a multicast Ethernet MAC address */
+ macaddr[0] = a >> 8;
+ macaddr[1] = a & 0xff;
+
+ for (i = 2; i < 5; i++)
+ macaddr[i] = hw_addr[i + 1];
+
+ for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev);
+ vf < nvfs; vf++) {
+ macaddr[5] = adap->pf * 16 + vf;
+ ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr);
}
}
-static int cxgb_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
@@ -2668,8 +2676,8 @@ static int cxgb_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
return ret;
}
-static int cxgb_get_vf_config(struct net_device *dev,
- int vf, struct ifla_vf_info *ivi)
+static int cxgb4_mgmt_get_vf_config(struct net_device *dev,
+ int vf, struct ifla_vf_info *ivi)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
@@ -2683,8 +2691,8 @@ static int cxgb_get_vf_config(struct net_device *dev,
return 0;
}
-static int cxgb_get_phys_port_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
+static int cxgb4_mgmt_get_phys_port_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
{
struct port_info *pi = netdev_priv(dev);
unsigned int phy_port_id;
@@ -2695,8 +2703,8 @@ static int cxgb_get_phys_port_id(struct net_device *dev,
return 0;
}
-static int cxgb_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
- int max_tx_rate)
+static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
+ int min_tx_rate, int max_tx_rate)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
@@ -2775,7 +2783,30 @@ static int cxgb_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
return 0;
}
-#endif
+static int cxgb4_mgmt_set_vf_vlan(struct net_device *dev, int vf,
+ u16 vlan, u8 qos, __be16 vlan_proto)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ int ret;
+
+ if (vf >= adap->num_vfs || vlan > 4095 || qos > 7)
+ return -EINVAL;
+
+ if (vlan_proto != htons(ETH_P_8021Q) || qos != 0)
+ return -EPROTONOSUPPORT;
+
+ ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan);
+ if (!ret) {
+ adap->vfinfo[vf].vlan = vlan;
+ return 0;
+ }
+
+ dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n",
+ ret, (vlan ? "setting" : "clearing"), adap->pf, vf);
+ return ret;
+}
+#endif /* CONFIG_PCI_IOV */
static int cxgb_set_mac_addr(struct net_device *dev, void *p)
{
@@ -2897,9 +2928,6 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
static int cxgb_setup_tc_flower(struct net_device *dev,
struct tc_cls_flower_offload *cls_flower)
{
- if (cls_flower->common.chain_index)
- return -EOPNOTSUPP;
-
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
return cxgb4_tc_flower_replace(dev, cls_flower);
@@ -2915,9 +2943,6 @@ static int cxgb_setup_tc_flower(struct net_device *dev,
static int cxgb_setup_tc_cls_u32(struct net_device *dev,
struct tc_cls_u32_offload *cls_u32)
{
- if (cls_u32->common.chain_index)
- return -EOPNOTSUPP;
-
switch (cls_u32->command) {
case TC_CLSU32_NEW_KNODE:
case TC_CLSU32_REPLACE_KNODE:
@@ -2943,7 +2968,7 @@ static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
return -EINVAL;
}
- if (!tc_can_offload(dev))
+ if (!tc_cls_can_offload_and_chain0(dev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -2987,6 +3012,174 @@ static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
+static void cxgb_del_udp_tunnel(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
+{
+ struct port_info *pi = netdev_priv(netdev);
+ struct adapter *adapter = pi->adapter;
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+ u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
+ int ret = 0, i;
+
+ if (chip_ver < CHELSIO_T6)
+ return;
+
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (!adapter->vxlan_port_cnt ||
+ adapter->vxlan_port != ti->port)
+ return; /* Invalid VxLAN destination port */
+
+ adapter->vxlan_port_cnt--;
+ if (adapter->vxlan_port_cnt)
+ return;
+
+ adapter->vxlan_port = 0;
+ t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (!adapter->geneve_port_cnt ||
+ adapter->geneve_port != ti->port)
+ return; /* Invalid GENEVE destination port */
+
+ adapter->geneve_port_cnt--;
+ if (adapter->geneve_port_cnt)
+ return;
+
+ adapter->geneve_port = 0;
+ t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
+ default:
+ return;
+ }
+
+ /* Matchall mac entries can be deleted only after all tunnel ports
+ * are brought down or removed.
+ */
+ if (!adapter->rawf_cnt)
+ return;
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ ret = t4_free_raw_mac_filt(adapter, pi->viid,
+ match_all_mac, match_all_mac,
+ adapter->rawf_start +
+ pi->port_id,
+ 1, pi->port_id, true);
+ if (ret < 0) {
+ netdev_info(netdev, "Failed to free mac filter entry, for port %d\n",
+ i);
+ return;
+ }
+ atomic_dec(&adapter->mps_encap[adapter->rawf_start +
+ pi->port_id].refcnt);
+ }
+}
+
+static void cxgb_add_udp_tunnel(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
+{
+ struct port_info *pi = netdev_priv(netdev);
+ struct adapter *adapter = pi->adapter;
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+ u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
+ int i, ret;
+
+ if (chip_ver < CHELSIO_T6 || !adapter->rawf_cnt)
+ return;
+
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ /* Callback for adding vxlan port can be called with the same
+ * port for both IPv4 and IPv6. We should not disable the
+ * offloading when the same port for both protocols is added
+ * and later one of them is removed.
+ */
+ if (adapter->vxlan_port_cnt &&
+ adapter->vxlan_port == ti->port) {
+ adapter->vxlan_port_cnt++;
+ return;
+ }
+
+ /* We will support only one VxLAN port */
+ if (adapter->vxlan_port_cnt) {
+ netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
+ be16_to_cpu(adapter->vxlan_port),
+ be16_to_cpu(ti->port));
+ return;
+ }
+
+ adapter->vxlan_port = ti->port;
+ adapter->vxlan_port_cnt = 1;
+
+ t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A,
+ VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (adapter->geneve_port_cnt &&
+ adapter->geneve_port == ti->port) {
+ adapter->geneve_port_cnt++;
+ return;
+ }
+
+ /* We will support only one GENEVE port */
+ if (adapter->geneve_port_cnt) {
+ netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
+ be16_to_cpu(adapter->geneve_port),
+ be16_to_cpu(ti->port));
+ return;
+ }
+
+ adapter->geneve_port = ti->port;
+ adapter->geneve_port_cnt = 1;
+
+ t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
+ GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
+ default:
+ return;
+ }
+
+ /* Create a 'match all' mac filter entry for inner mac,
+ * if raw mac interface is supported. Once the linux kernel provides
+ * driver entry points for adding/deleting the inner mac addresses,
+ * we will remove this 'match all' entry and fallback to adding
+ * exact match filters.
+ */
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+
+ ret = t4_alloc_raw_mac_filt(adapter, pi->viid,
+ match_all_mac,
+ match_all_mac,
+ adapter->rawf_start +
+ pi->port_id,
+ 1, pi->port_id, true);
+ if (ret < 0) {
+ netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n",
+ be16_to_cpu(ti->port));
+ cxgb_del_udp_tunnel(netdev, ti);
+ return;
+ }
+ atomic_inc(&adapter->mps_encap[ret].refcnt);
+ }
+}
+
+static netdev_features_t cxgb_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
+ return features;
+
+ /* Check if hw supports offload for this packet */
+ if (!skb->encapsulation || cxgb_encap_offload_supported(skb))
+ return features;
+
+ /* Offload is not supported for this encapsulated packet */
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+}
+
static netdev_features_t cxgb_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -3018,20 +3211,25 @@ static const struct net_device_ops cxgb4_netdev_ops = {
#endif /* CONFIG_CHELSIO_T4_FCOE */
.ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
.ndo_setup_tc = cxgb_setup_tc,
+ .ndo_udp_tunnel_add = cxgb_add_udp_tunnel,
+ .ndo_udp_tunnel_del = cxgb_del_udp_tunnel,
+ .ndo_features_check = cxgb_features_check,
.ndo_fix_features = cxgb_fix_features,
};
#ifdef CONFIG_PCI_IOV
static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
- .ndo_open = dummy_open,
- .ndo_set_vf_mac = cxgb_set_vf_mac,
- .ndo_get_vf_config = cxgb_get_vf_config,
- .ndo_set_vf_rate = cxgb_set_vf_rate,
- .ndo_get_phys_port_id = cxgb_get_phys_port_id,
+ .ndo_open = cxgb4_mgmt_open,
+ .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac,
+ .ndo_get_vf_config = cxgb4_mgmt_get_vf_config,
+ .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate,
+ .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id,
+ .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan,
};
#endif
-static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
{
struct adapter *adapter = netdev2adap(dev);
@@ -3043,7 +3241,7 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
}
static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
- .get_drvinfo = get_drvinfo,
+ .get_drvinfo = cxgb4_mgmt_get_drvinfo,
};
void t4_fatal_err(struct adapter *adap)
@@ -4759,7 +4957,7 @@ static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
}
#ifdef CONFIG_PCI_IOV
-static void dummy_setup(struct net_device *dev)
+static void cxgb4_mgmt_setup(struct net_device *dev)
{
dev->type = ARPHRD_NONE;
dev->mtu = 0;
@@ -4775,38 +4973,6 @@ static void dummy_setup(struct net_device *dev)
dev->needs_free_netdev = true;
}
-static int config_mgmt_dev(struct pci_dev *pdev)
-{
- struct adapter *adap = pci_get_drvdata(pdev);
- struct net_device *netdev;
- struct port_info *pi;
- char name[IFNAMSIZ];
- int err;
-
- snprintf(name, IFNAMSIZ, "mgmtpf%d%d", adap->adap_idx, adap->pf);
- netdev = alloc_netdev(sizeof(struct port_info), name, NET_NAME_UNKNOWN,
- dummy_setup);
- if (!netdev)
- return -ENOMEM;
-
- pi = netdev_priv(netdev);
- pi->adapter = adap;
- pi->tx_chan = adap->pf % adap->params.nports;
- SET_NETDEV_DEV(netdev, &pdev->dev);
-
- adap->port[0] = netdev;
- pi->port_id = 0;
-
- err = register_netdev(adap->port[0]);
- if (err) {
- pr_info("Unable to register VF mgmt netdev %s\n", name);
- free_netdev(adap->port[0]);
- adap->port[0] = NULL;
- return err;
- }
- return 0;
-}
-
static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
{
struct adapter *adap = pci_get_drvdata(pdev);
@@ -4818,7 +4984,7 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
/* Check if cxgb4 is the MASTER and fw is initialized */
if (!(pcie_fw & PCIE_FW_INIT_F) ||
!(pcie_fw & PCIE_FW_MASTER_VLD_F) ||
- PCIE_FW_MASTER_G(pcie_fw) != 4) {
+ PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF) {
dev_warn(&pdev->dev,
"cxgb4 driver needs to be MASTER to support SRIOV\n");
return -EOPNOTSUPP;
@@ -4830,46 +4996,132 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
if (current_vfs && pci_vfs_assigned(pdev)) {
dev_err(&pdev->dev,
"Cannot modify SR-IOV while VFs are assigned\n");
- num_vfs = current_vfs;
- return num_vfs;
+ return current_vfs;
}
-
- /* Disable SRIOV when zero is passed.
- * One needs to disable SRIOV before modifying it, else
- * stack throws the below warning:
- * " 'n' VFs already enabled. Disable before enabling 'm' VFs."
+ /* Note that the upper-level code ensures that we're never called with
+ * a non-zero "num_vfs" when we already have VFs instantiated. But
+ * it never hurts to code defensively.
*/
+ if (num_vfs != 0 && current_vfs != 0)
+ return -EBUSY;
+
+ /* Nothing to do for no change. */
+ if (num_vfs == current_vfs)
+ return num_vfs;
+
+ /* Disable SRIOV when zero is passed. */
if (!num_vfs) {
pci_disable_sriov(pdev);
- if (adap->port[0]) {
- unregister_netdev(adap->port[0]);
- adap->port[0] = NULL;
- }
+ /* free VF Management Interface */
+ unregister_netdev(adap->port[0]);
+ free_netdev(adap->port[0]);
+ adap->port[0] = NULL;
+
/* free VF resources */
+ adap->num_vfs = 0;
kfree(adap->vfinfo);
adap->vfinfo = NULL;
- adap->num_vfs = 0;
- return num_vfs;
+ return 0;
}
- if (num_vfs != current_vfs) {
- err = pci_enable_sriov(pdev, num_vfs);
+ if (!current_vfs) {
+ struct fw_pfvf_cmd port_cmd, port_rpl;
+ struct net_device *netdev;
+ unsigned int pmask, port;
+ struct pci_dev *pbridge;
+ struct port_info *pi;
+ char name[IFNAMSIZ];
+ u32 devcap2;
+ u16 flags;
+ int pos;
+
+ /* If we want to instantiate Virtual Functions, then our
+ * parent bridge's PCI-E needs to support Alternative Routing
+ * ID (ARI) because our VFs will show up at function offset 8
+ * and above.
+ */
+ pbridge = pdev->bus->self;
+ pos = pci_find_capability(pbridge, PCI_CAP_ID_EXP);
+ pci_read_config_word(pbridge, pos + PCI_EXP_FLAGS, &flags);
+ pci_read_config_dword(pbridge, pos + PCI_EXP_DEVCAP2, &devcap2);
+
+ if ((flags & PCI_EXP_FLAGS_VERS) < 2 ||
+ !(devcap2 & PCI_EXP_DEVCAP2_ARI)) {
+ /* Our parent bridge does not support ARI so issue a
+ * warning and skip instantiating the VFs. They
+ * won't be reachable.
+ */
+ dev_warn(&pdev->dev, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Functions\n",
+ pbridge->bus->number, PCI_SLOT(pbridge->devfn),
+ PCI_FUNC(pbridge->devfn));
+ return -ENOTSUPP;
+ }
+ memset(&port_cmd, 0, sizeof(port_cmd));
+ port_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ FW_PFVF_CMD_PFN_V(adap->pf) |
+ FW_PFVF_CMD_VFN_V(0));
+ port_cmd.retval_len16 = cpu_to_be32(FW_LEN16(port_cmd));
+ err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd),
+ &port_rpl);
if (err)
return err;
+ pmask = FW_PFVF_CMD_PMASK_G(be32_to_cpu(port_rpl.type_to_neq));
+ port = ffs(pmask) - 1;
+ /* Allocate VF Management Interface. */
+ snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx,
+ adap->pf);
+ netdev = alloc_netdev(sizeof(struct port_info),
+ name, NET_NAME_UNKNOWN, cxgb4_mgmt_setup);
+ if (!netdev)
+ return -ENOMEM;
- adap->num_vfs = num_vfs;
- err = config_mgmt_dev(pdev);
- if (err)
+ pi = netdev_priv(netdev);
+ pi->adapter = adap;
+ pi->lport = port;
+ pi->tx_chan = port;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adap->port[0] = netdev;
+ pi->port_id = 0;
+
+ err = register_netdev(adap->port[0]);
+ if (err) {
+ pr_info("Unable to register VF mgmt netdev %s\n", name);
+ free_netdev(adap->port[0]);
+ adap->port[0] = NULL;
return err;
+ }
+ /* Allocate and set up VF Information. */
+ adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev),
+ sizeof(struct vf_info), GFP_KERNEL);
+ if (!adap->vfinfo) {
+ unregister_netdev(adap->port[0]);
+ free_netdev(adap->port[0]);
+ adap->port[0] = NULL;
+ return -ENOMEM;
+ }
+ cxgb4_mgmt_fill_vf_station_mac_addr(adap);
+ }
+ /* Instantiate the requested number of VFs. */
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err) {
+ pr_info("Unable to instantiate %d VFs\n", num_vfs);
+ if (!current_vfs) {
+ unregister_netdev(adap->port[0]);
+ free_netdev(adap->port[0]);
+ adap->port[0] = NULL;
+ kfree(adap->vfinfo);
+ adap->vfinfo = NULL;
+ }
+ return err;
}
- adap->vfinfo = kcalloc(adap->num_vfs,
- sizeof(struct vf_info), GFP_KERNEL);
- if (adap->vfinfo)
- fill_vf_station_mac_addr(adap);
+ adap->num_vfs = num_vfs;
return num_vfs;
}
-#endif
+#endif /* CONFIG_PCI_IOV */
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -4882,9 +5134,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
u32 whoami, pl_rev;
enum chip_type chip;
static int adap_idx = 1;
-#ifdef CONFIG_PCI_IOV
- u32 v, port_vec;
-#endif
printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
@@ -4908,6 +5157,13 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable_device;
}
+ adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ if (!adapter) {
+ err = -ENOMEM;
+ goto out_unmap_bar0;
+ }
+
+ adapter->regs = regs;
err = t4_wait_dev_ready(regs);
if (err < 0)
goto out_unmap_bar0;
@@ -4918,13 +5174,29 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
chip = get_chip_type(pdev, pl_rev);
func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
+
+ adapter->pdev = pdev;
+ adapter->pdev_dev = &pdev->dev;
+ adapter->name = pci_name(pdev);
+ adapter->mbox = func;
+ adapter->pf = func;
+ adapter->msg_enable = DFLT_MSG_ENABLE;
+ adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
+ (sizeof(struct mbox_cmd) *
+ T4_OS_LOG_MBOX_CMDS),
+ GFP_KERNEL);
+ if (!adapter->mbox_log) {
+ err = -ENOMEM;
+ goto out_free_adapter;
+ }
+ spin_lock_init(&adapter->mbox_lock);
+ INIT_LIST_HEAD(&adapter->mlist.list);
+ pci_set_drvdata(pdev, adapter);
+
if (func != ent->driver_data) {
-#ifndef CONFIG_PCI_IOV
- iounmap(regs);
-#endif
pci_disable_device(pdev);
pci_save_state(pdev); /* to restore SR-IOV later */
- goto sriov;
+ return 0;
}
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
@@ -4933,53 +5205,30 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
"coherent allocations\n");
- goto out_unmap_bar0;
+ goto out_free_adapter;
}
} else {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "no usable DMA configuration\n");
- goto out_unmap_bar0;
+ goto out_free_adapter;
}
}
pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
pci_save_state(pdev);
-
- adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
- if (!adapter) {
- err = -ENOMEM;
- goto out_unmap_bar0;
- }
adap_idx++;
-
adapter->workq = create_singlethread_workqueue("cxgb4");
if (!adapter->workq) {
err = -ENOMEM;
goto out_free_adapter;
}
- adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
- (sizeof(struct mbox_cmd) *
- T4_OS_LOG_MBOX_CMDS),
- GFP_KERNEL);
- if (!adapter->mbox_log) {
- err = -ENOMEM;
- goto out_free_adapter;
- }
adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
/* PCI device has been enabled */
adapter->flags |= DEV_ENABLED;
-
- adapter->regs = regs;
- adapter->pdev = pdev;
- adapter->pdev_dev = &pdev->dev;
- adapter->name = pci_name(pdev);
- adapter->mbox = func;
- adapter->pf = func;
- adapter->msg_enable = DFLT_MSG_ENABLE;
memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
/* If possible, we use PCIe Relaxed Ordering Attribute to deliver
@@ -5002,9 +5251,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&adapter->stats_lock);
spin_lock_init(&adapter->tid_release_lock);
spin_lock_init(&adapter->win0_lock);
- spin_lock_init(&adapter->mbox_lock);
-
- INIT_LIST_HEAD(&adapter->mlist.list);
INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
INIT_WORK(&adapter->db_full_task, process_db_full);
@@ -5080,6 +5326,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_RXCSUM | NETIF_F_RXHASH |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_TC;
+
+ if (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5)
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+
if (highdma)
netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->features |= netdev->hw_features;
@@ -5273,58 +5523,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
setup_fw_sge_queues(adapter);
return 0;
-sriov:
-#ifdef CONFIG_PCI_IOV
- adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
- if (!adapter) {
- err = -ENOMEM;
- goto free_pci_region;
- }
-
- adapter->pdev = pdev;
- adapter->pdev_dev = &pdev->dev;
- adapter->name = pci_name(pdev);
- adapter->mbox = func;
- adapter->pf = func;
- adapter->regs = regs;
- adapter->adap_idx = adap_idx;
- adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
- (sizeof(struct mbox_cmd) *
- T4_OS_LOG_MBOX_CMDS),
- GFP_KERNEL);
- if (!adapter->mbox_log) {
- err = -ENOMEM;
- goto free_adapter;
- }
- spin_lock_init(&adapter->mbox_lock);
- INIT_LIST_HEAD(&adapter->mlist.list);
-
- v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
- err = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 1,
- &v, &port_vec);
- if (err < 0) {
- dev_err(adapter->pdev_dev, "Could not fetch port params\n");
- goto free_mbox_log;
- }
-
- adapter->params.nports = hweight32(port_vec);
- pci_set_drvdata(pdev, adapter);
- return 0;
-
-free_mbox_log:
- kfree(adapter->mbox_log);
- free_adapter:
- kfree(adapter);
- free_pci_region:
- iounmap(regs);
- pci_disable_sriov(pdev);
- pci_release_regions(pdev);
- return err;
-#else
- return 0;
-#endif
-
out_free_dev:
free_some_resources(adapter);
if (adapter->flags & USING_MSIX)
@@ -5416,14 +5614,7 @@ static void remove_one(struct pci_dev *pdev)
}
#ifdef CONFIG_PCI_IOV
else {
- if (adapter->port[0])
- unregister_netdev(adapter->port[0]);
- iounmap(adapter->regs);
- kfree(adapter->vfinfo);
- kfree(adapter->mbox_log);
- kfree(adapter);
- pci_disable_sriov(pdev);
- pci_release_regions(pdev);
+ cxgb4_iov_configure(adapter->pdev, 0);
}
#endif
}
@@ -5467,18 +5658,6 @@ static void shutdown_one(struct pci_dev *pdev)
if (adapter->flags & FW_OK)
t4_fw_bye(adapter, adapter->mbox);
}
-#ifdef CONFIG_PCI_IOV
- else {
- if (adapter->port[0])
- unregister_netdev(adapter->port[0]);
- iounmap(adapter->regs);
- kfree(adapter->vfinfo);
- kfree(adapter->mbox_log);
- kfree(adapter);
- pci_disable_sriov(pdev);
- pci_release_regions(pdev);
- }
-#endif
}
static struct pci_driver cxgb4_driver = {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index a452d5a1b0f3..36563364bae7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -43,7 +43,7 @@
#define STATS_CHECK_PERIOD (HZ / 2)
-struct ch_tc_pedit_fields pedits[] = {
+static struct ch_tc_pedit_fields pedits[] = {
PEDIT_FIELDS(ETH_, DMAC_31_0, 4, dmac, 0),
PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4),
PEDIT_FIELDS(ETH_, SMAC_15_0, 2, smac, 0),
@@ -408,9 +408,7 @@ static void cxgb4_process_flow_actions(struct net_device *in,
} else if (is_tcf_gact_shot(a)) {
fs->action = FILTER_DROP;
} else if (is_tcf_mirred_egress_redirect(a)) {
- int ifindex = tcf_mirred_ifindex(a);
- struct net_device *out = __dev_get_by_index(dev_net(in),
- ifindex);
+ struct net_device *out = tcf_mirred_dev(a);
struct port_info *pi = netdev_priv(out);
fs->action = FILTER_SWITCH;
@@ -585,14 +583,14 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
/* Do nothing */
} else if (is_tcf_mirred_egress_redirect(a)) {
struct adapter *adap = netdev2adap(dev);
- struct net_device *n_dev;
- unsigned int i, ifindex;
+ struct net_device *n_dev, *target_dev;
+ unsigned int i;
bool found = false;
- ifindex = tcf_mirred_ifindex(a);
+ target_dev = tcf_mirred_dev(a);
for_each_port(adap, i) {
n_dev = adap->port[i];
- if (ifindex == n_dev->ifindex) {
+ if (target_dev == n_dev) {
found = true;
break;
}
@@ -768,9 +766,7 @@ static void ch_flower_stats_handler(struct work_struct *work)
rhashtable_walk_enter(&adap->flower_tbl, &iter);
do {
- flower_entry = ERR_PTR(rhashtable_walk_start(&iter));
- if (IS_ERR(flower_entry))
- goto walk_stop;
+ rhashtable_walk_start(&iter);
while ((flower_entry = rhashtable_walk_next(&iter)) &&
!IS_ERR(flower_entry)) {
@@ -789,8 +785,9 @@ static void ch_flower_stats_handler(struct work_struct *work)
spin_unlock(&flower_entry->lock);
}
}
-walk_stop:
+
rhashtable_walk_stop(&iter);
+
} while (flower_entry == ERR_PTR(-EAGAIN));
rhashtable_walk_exit(&iter);
mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index cd0cd13a964d..ab174bcfbfb0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -114,14 +114,14 @@ static int fill_action_fields(struct adapter *adap,
/* Re-direct to specified port in hardware. */
if (is_tcf_mirred_egress_redirect(a)) {
- struct net_device *n_dev;
- unsigned int i, index;
+ struct net_device *n_dev, *target_dev;
bool found = false;
+ unsigned int i;
- index = tcf_mirred_ifindex(a);
+ target_dev = tcf_mirred_dev(a);
for_each_port(adap, i) {
n_dev = adap->port[i];
- if (index == n_dev->ifindex) {
+ if (target_dev == n_dev) {
fs->action = FILTER_SWITCH;
fs->eport = i;
found = true;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 6c7b0ac0b48b..6e310a0da7c9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -761,12 +761,19 @@ static inline unsigned int flits_to_desc(unsigned int n)
* Returns whether an Ethernet packet is small enough to fit as
* immediate data. Return value corresponds to headroom required.
*/
-static inline int is_eth_imm(const struct sk_buff *skb)
+static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver)
{
- int hdrlen = skb_shinfo(skb)->gso_size ?
- sizeof(struct cpl_tx_pkt_lso_core) : 0;
+ int hdrlen = 0;
- hdrlen += sizeof(struct cpl_tx_pkt);
+ if (skb->encapsulation && skb_shinfo(skb)->gso_size &&
+ chip_ver > CHELSIO_T5) {
+ hdrlen = sizeof(struct cpl_tx_tnl_lso);
+ hdrlen += sizeof(struct cpl_tx_pkt_core);
+ } else {
+ hdrlen = skb_shinfo(skb)->gso_size ?
+ sizeof(struct cpl_tx_pkt_lso_core) : 0;
+ hdrlen += sizeof(struct cpl_tx_pkt);
+ }
if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
return hdrlen;
return 0;
@@ -779,10 +786,11 @@ static inline int is_eth_imm(const struct sk_buff *skb)
* Returns the number of flits needed for a Tx WR for the given Ethernet
* packet, including the needed WR and CPL headers.
*/
-static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
+static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
+ unsigned int chip_ver)
{
unsigned int flits;
- int hdrlen = is_eth_imm(skb);
+ int hdrlen = is_eth_imm(skb, chip_ver);
/* If the skb is small enough, we can pump it out as a work request
* with only immediate data. In that case we just have to have the
@@ -801,13 +809,20 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
* with an embedded TX Packet Write CPL message.
*/
flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
- if (skb_shinfo(skb)->gso_size)
- flits += (sizeof(struct fw_eth_tx_pkt_wr) +
- sizeof(struct cpl_tx_pkt_lso_core) +
- sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
- else
+ if (skb_shinfo(skb)->gso_size) {
+ if (skb->encapsulation && chip_ver > CHELSIO_T5)
+ hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
+ sizeof(struct cpl_tx_tnl_lso);
+ else
+ hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
+ sizeof(struct cpl_tx_pkt_lso_core);
+
+ hdrlen += sizeof(struct cpl_tx_pkt_core);
+ flits += (hdrlen / sizeof(__be64));
+ } else {
flits += (sizeof(struct fw_eth_tx_pkt_wr) +
sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ }
return flits;
}
@@ -818,9 +833,10 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
* Returns the number of Tx descriptors needed for the given Ethernet
* packet, including the needed WR and CPL headers.
*/
-static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
+static inline unsigned int calc_tx_descs(const struct sk_buff *skb,
+ unsigned int chip_ver)
{
- return flits_to_desc(calc_tx_flits(skb));
+ return flits_to_desc(calc_tx_flits(skb, chip_ver));
}
/**
@@ -1148,6 +1164,105 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
}
#endif /* CONFIG_CHELSIO_T4_FCOE */
+/* Returns tunnel type if hardware supports offloading of the same.
+ * It is called only for T5 and onwards.
+ */
+enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb)
+{
+ u8 l4_hdr = 0;
+ enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
+ struct port_info *pi = netdev_priv(skb->dev);
+ struct adapter *adapter = pi->adapter;
+
+ if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
+ skb->inner_protocol != htons(ETH_P_TEB))
+ return tnl_type;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ l4_hdr = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ l4_hdr = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return tnl_type;
+ }
+
+ switch (l4_hdr) {
+ case IPPROTO_UDP:
+ if (adapter->vxlan_port == udp_hdr(skb)->dest)
+ tnl_type = TX_TNL_TYPE_VXLAN;
+ else if (adapter->geneve_port == udp_hdr(skb)->dest)
+ tnl_type = TX_TNL_TYPE_GENEVE;
+ break;
+ default:
+ return tnl_type;
+ }
+
+ return tnl_type;
+}
+
+static inline void t6_fill_tnl_lso(struct sk_buff *skb,
+ struct cpl_tx_tnl_lso *tnl_lso,
+ enum cpl_tx_tnl_lso_type tnl_type)
+{
+ u32 val;
+ int in_eth_xtra_len;
+ int l3hdr_len = skb_network_header_len(skb);
+ int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+ const struct skb_shared_info *ssi = skb_shinfo(skb);
+ bool v6 = (ip_hdr(skb)->version == 6);
+
+ val = CPL_TX_TNL_LSO_OPCODE_V(CPL_TX_TNL_LSO) |
+ CPL_TX_TNL_LSO_FIRST_F |
+ CPL_TX_TNL_LSO_LAST_F |
+ (v6 ? CPL_TX_TNL_LSO_IPV6OUT_F : 0) |
+ CPL_TX_TNL_LSO_ETHHDRLENOUT_V(eth_xtra_len / 4) |
+ CPL_TX_TNL_LSO_IPHDRLENOUT_V(l3hdr_len / 4) |
+ (v6 ? 0 : CPL_TX_TNL_LSO_IPHDRCHKOUT_F) |
+ CPL_TX_TNL_LSO_IPLENSETOUT_F |
+ (v6 ? 0 : CPL_TX_TNL_LSO_IPIDINCOUT_F);
+ tnl_lso->op_to_IpIdSplitOut = htonl(val);
+
+ tnl_lso->IpIdOffsetOut = 0;
+
+ /* Get the tunnel header length */
+ val = skb_inner_mac_header(skb) - skb_mac_header(skb);
+ in_eth_xtra_len = skb_inner_network_header(skb) -
+ skb_inner_mac_header(skb) - ETH_HLEN;
+
+ switch (tnl_type) {
+ case TX_TNL_TYPE_VXLAN:
+ case TX_TNL_TYPE_GENEVE:
+ tnl_lso->UdpLenSetOut_to_TnlHdrLen =
+ htons(CPL_TX_TNL_LSO_UDPCHKCLROUT_F |
+ CPL_TX_TNL_LSO_UDPLENSETOUT_F);
+ break;
+ default:
+ tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0;
+ break;
+ }
+
+ tnl_lso->UdpLenSetOut_to_TnlHdrLen |=
+ htons(CPL_TX_TNL_LSO_TNLHDRLEN_V(val) |
+ CPL_TX_TNL_LSO_TNLTYPE_V(tnl_type));
+
+ tnl_lso->r1 = 0;
+
+ val = CPL_TX_TNL_LSO_ETHHDRLEN_V(in_eth_xtra_len / 4) |
+ CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) |
+ CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb) / 4) |
+ CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb) / 4);
+ tnl_lso->Flow_to_TcpHdrLen = htonl(val);
+
+ tnl_lso->IpIdOffset = htons(0);
+
+ tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size));
+ tnl_lso->TCPSeqOffset = htonl(0);
+ tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len));
+}
+
/**
* t4_eth_xmit - add a packet to an Ethernet Tx queue
* @skb: the packet
@@ -1171,6 +1286,9 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
bool immediate = false;
int len, max_pkt_len;
bool ptp_enabled = is_ptp_enabled(skb, dev);
+ unsigned int chip_ver;
+ enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
+
#ifdef CONFIG_CHELSIO_T4_FCOE
int err;
#endif /* CONFIG_CHELSIO_T4_FCOE */
@@ -1227,7 +1345,8 @@ out_free: dev_kfree_skb_any(skb);
}
#endif /* CONFIG_CHELSIO_T4_FCOE */
- flits = calc_tx_flits(skb);
+ chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
+ flits = calc_tx_flits(skb, chip_ver);
ndesc = flits_to_desc(flits);
credits = txq_avail(&q->q) - ndesc;
@@ -1241,9 +1360,12 @@ out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_BUSY;
}
- if (is_eth_imm(skb))
+ if (is_eth_imm(skb, chip_ver))
immediate = true;
+ if (skb->encapsulation && chip_ver > CHELSIO_T5)
+ tnl_type = cxgb_encap_offload_supported(skb);
+
if (!immediate &&
unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
q->mapping_err++;
@@ -1269,33 +1391,58 @@ out_free: dev_kfree_skb_any(skb);
bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
int l3hdr_len = skb_network_header_len(skb);
int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+ struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1);
+
+ if (tnl_type)
+ len += sizeof(*tnl_lso);
+ else
+ len += sizeof(*lso);
- len += sizeof(*lso);
wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
FW_WR_IMMDLEN_V(len));
- lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
- LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
- LSO_IPV6_V(v6) |
- LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
- LSO_IPHDR_LEN_V(l3hdr_len / 4) |
- LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
- lso->c.ipid_ofst = htons(0);
- lso->c.mss = htons(ssi->gso_size);
- lso->c.seqno_offset = htonl(0);
- if (is_t4(adap->params.chip))
- lso->c.len = htonl(skb->len);
- else
- lso->c.len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
- cpl = (void *)(lso + 1);
+ if (tnl_type) {
+ struct iphdr *iph = ip_hdr(skb);
- if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
- cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
- else
- cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+ t6_fill_tnl_lso(skb, tnl_lso, tnl_type);
+ cpl = (void *)(tnl_lso + 1);
+ /* Driver is expected to compute partial checksum that
+ * does not include the IP Total Length.
+ */
+ if (iph->version == 4) {
+ iph->check = 0;
+ iph->tot_len = 0;
+ iph->check = (u16)(~ip_fast_csum((u8 *)iph,
+ iph->ihl));
+ }
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ cntrl = hwcsum(adap->params.chip, skb);
+ } else {
+ lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
+ LSO_IPV6_V(v6) |
+ LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+ LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+ LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
+ lso->c.ipid_ofst = htons(0);
+ lso->c.mss = htons(ssi->gso_size);
+ lso->c.seqno_offset = htonl(0);
+ if (is_t4(adap->params.chip))
+ lso->c.len = htonl(skb->len);
+ else
+ lso->c.len =
+ htonl(LSO_T5_XFER_SIZE_V(skb->len));
+ cpl = (void *)(lso + 1);
- cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
- TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
- TXPKT_IPHDR_LEN_V(l3hdr_len);
+ if (CHELSIO_CHIP_VERSION(adap->params.chip)
+ <= CHELSIO_T5)
+ cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+ else
+ cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+
+ cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
+ TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+ TXPKT_IPHDR_LEN_V(l3hdr_len);
+ }
q->tso++;
q->tx_cso += ssi->gso_segs;
} else {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 375ef86a84da..047609ef0515 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -195,9 +195,11 @@ static void t4_report_fw_error(struct adapter *adap)
u32 pcie_fw;
pcie_fw = t4_read_reg(adap, PCIE_FW_A);
- if (pcie_fw & PCIE_FW_ERR_F)
+ if (pcie_fw & PCIE_FW_ERR_F) {
dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
reason[PCIE_FW_EVAL_G(pcie_fw)]);
+ adap->flags &= ~FW_OK;
+ }
}
/*
@@ -317,9 +319,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
* wait [for a while] till we're at the front [or bail out with an
* EBUSY] ...
*/
- spin_lock(&adap->mbox_lock);
+ spin_lock_bh(&adap->mbox_lock);
list_add_tail(&entry.list, &adap->mlist.list);
- spin_unlock(&adap->mbox_lock);
+ spin_unlock_bh(&adap->mbox_lock);
delay_idx = 0;
ms = delay[0];
@@ -332,9 +334,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
*/
pcie_fw = t4_read_reg(adap, PCIE_FW_A);
if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) {
- spin_lock(&adap->mbox_lock);
+ spin_lock_bh(&adap->mbox_lock);
list_del(&entry.list);
- spin_unlock(&adap->mbox_lock);
+ spin_unlock_bh(&adap->mbox_lock);
ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY;
t4_record_mbox(adap, cmd, size, access, ret);
return ret;
@@ -365,9 +367,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
if (v != MBOX_OWNER_DRV) {
- spin_lock(&adap->mbox_lock);
+ spin_lock_bh(&adap->mbox_lock);
list_del(&entry.list);
- spin_unlock(&adap->mbox_lock);
+ spin_unlock_bh(&adap->mbox_lock);
ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
t4_record_mbox(adap, cmd, size, access, ret);
return ret;
@@ -418,9 +420,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
execute = i + ms;
t4_record_mbox(adap, cmd_rpl,
MBOX_LEN, access, execute);
- spin_lock(&adap->mbox_lock);
+ spin_lock_bh(&adap->mbox_lock);
list_del(&entry.list);
- spin_unlock(&adap->mbox_lock);
+ spin_unlock_bh(&adap->mbox_lock);
return -FW_CMD_RETVAL_G((int)res);
}
}
@@ -430,9 +432,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
*(const u8 *)cmd, mbox);
t4_report_fw_error(adap);
- spin_lock(&adap->mbox_lock);
+ spin_lock_bh(&adap->mbox_lock);
list_del(&entry.list);
- spin_unlock(&adap->mbox_lock);
+ spin_unlock_bh(&adap->mbox_lock);
t4_fatal_err(adap);
return ret;
}
@@ -524,11 +526,14 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
* MEM_EDC1 = 1
* MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
* MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
+ * MEM_HMA = 4
*/
edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
- if (mtype != MEM_MC1)
+ if (mtype == MEM_HMA) {
+ memoffset = 2 * (edc_size * 1024 * 1024);
+ } else if (mtype != MEM_MC1) {
memoffset = (mtype * (edc_size * 1024 * 1024));
- else {
+ } else {
mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
MA_EXT_MEMORY0_BAR_A));
memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
@@ -4923,6 +4928,14 @@ void t4_intr_disable(struct adapter *adapter)
t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
}
+unsigned int t4_chip_rss_size(struct adapter *adap)
+{
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ return RSS_NENTRIES;
+ else
+ return T6_RSS_NENTRIES;
+}
+
/**
* t4_config_rss_range - configure a portion of the RSS mapping table
* @adapter: the adapter
@@ -5061,10 +5074,11 @@ static int rd_rss_row(struct adapter *adap, int row, u32 *val)
*/
int t4_read_rss(struct adapter *adapter, u16 *map)
{
+ int i, ret, nentries;
u32 val;
- int i, ret;
- for (i = 0; i < RSS_NENTRIES / 2; ++i) {
+ nentries = t4_chip_rss_size(adapter);
+ for (i = 0; i < nentries / 2; ++i) {
ret = rd_rss_row(adapter, i, &val);
if (ret)
return ret;
@@ -5076,7 +5090,7 @@ int t4_read_rss(struct adapter *adapter, u16 *map)
static unsigned int t4_use_ldst(struct adapter *adap)
{
- return (adap->flags & FW_OK) || !adap->use_bd;
+ return (adap->flags & FW_OK) && !adap->use_bd;
}
/**
@@ -6071,6 +6085,7 @@ const char *t4_get_port_type_description(enum fw_port_type port_type)
"CR2_QSFP",
"SFP28",
"KR_SFP28",
+ "KR_XLAUI"
};
if (port_type < ARRAY_SIZE(port_type_description))
@@ -6526,18 +6541,21 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
* t4_sge_ctxt_flush - flush the SGE context cache
* @adap: the adapter
* @mbox: mailbox to use for the FW command
+ * @ctx_type: Egress or Ingress
*
* Issues a FW command through the given mailbox to flush the
* SGE context cache.
*/
-int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
{
int ret;
u32 ldst_addrspace;
struct fw_ldst_cmd c;
memset(&c, 0, sizeof(c));
- ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_SGE_EGRC);
+ ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(ctxt_type == CTXT_EGRESS ?
+ FW_LDST_ADDRSPC_SGE_EGRC :
+ FW_LDST_ADDRSPC_SGE_INGC);
c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
FW_CMD_REQUEST_F | FW_CMD_READ_F |
ldst_addrspace);
@@ -7451,6 +7469,112 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
}
/**
+ * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
+ * @adap: the adapter
+ * @viid: the VI id
+ * @addr: the MAC address
+ * @mask: the mask
+ * @idx: index of the entry in mps tcam
+ * @lookup_type: MAC address for inner (1) or outer (0) header
+ * @port_id: the port index
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Removes the mac entry at the specified index using raw mac interface.
+ *
+ * Returns a negative error number on failure.
+ */
+int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok)
+{
+ struct fw_vi_mac_cmd c;
+ struct fw_vi_mac_raw *p = &c.u.raw;
+ u32 val;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_CMD_EXEC_V(0) |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ val = FW_CMD_LEN16_V(1) |
+ FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
+ c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
+ FW_CMD_LEN16_V(val));
+
+ p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx) |
+ FW_VI_MAC_ID_BASED_FREE);
+
+ /* Lookup Type. Outer header: 0, Inner header: 1 */
+ p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
+ DATAPORTNUM_V(port_id));
+ /* Lookup mask and port mask */
+ p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
+ DATAPORTNUM_V(DATAPORTNUM_M));
+
+ /* Copy the address and the mask */
+ memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
+ memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
+
+ return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
+}
+
+/**
+ * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
+ * @adap: the adapter
+ * @viid: the VI id
+ * @mac: the MAC address
+ * @mask: the mask
+ * @idx: index at which to add this entry
+ * @port_id: the port index
+ * @lookup_type: MAC address for inner (1) or outer (0) header
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Adds the mac entry at the specified index using raw mac interface.
+ *
+ * Returns a negative error number or the allocated index for this mac.
+ */
+int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok)
+{
+ int ret = 0;
+ struct fw_vi_mac_cmd c;
+ struct fw_vi_mac_raw *p = &c.u.raw;
+ u32 val;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ val = FW_CMD_LEN16_V(1) |
+ FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
+ c.freemacs_to_len16 = cpu_to_be32(val);
+
+ /* Specify that this is an inner mac address */
+ p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx));
+
+ /* Lookup Type. Outer header: 0, Inner header: 1 */
+ p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
+ DATAPORTNUM_V(port_id));
+ /* Lookup mask and port mask */
+ p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
+ DATAPORTNUM_V(DATAPORTNUM_M));
+
+ /* Copy the address and the mask */
+ memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
+ memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
+
+ ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
+ if (ret == 0) {
+ ret = FW_VI_MAC_CMD_RAW_IDX_G(be32_to_cpu(p->raw_idx_pkd));
+ if (ret != idx)
+ ret = -ENOMEM;
+ }
+
+ return ret;
+}
+
+/**
* t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
* @adap: the adapter
* @mbox: mailbox to use for the FW command
@@ -8491,22 +8615,6 @@ found:
return 0;
}
-static void set_pcie_completion_timeout(struct adapter *adapter, u8 range)
-{
- u16 val;
- u32 pcie_cap;
-
- pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
- if (pcie_cap) {
- pci_read_config_word(adapter->pdev,
- pcie_cap + PCI_EXP_DEVCTL2, &val);
- val &= ~PCI_EXP_DEVCTL2_COMP_TIMEOUT;
- val |= range;
- pci_write_config_word(adapter->pdev,
- pcie_cap + PCI_EXP_DEVCTL2, val);
- }
-}
-
/**
* t4_prep_adapter - prepare SW and HW for operation
* @adapter: the adapter
@@ -8592,8 +8700,9 @@ int t4_prep_adapter(struct adapter *adapter)
adapter->params.portvec = 1;
adapter->params.vpd.cclk = 50000;
- /* Set pci completion timeout value to 4 seconds. */
- set_pcie_completion_timeout(adapter, 0xd);
+ /* Set PCIe completion timeout to 4 seconds. */
+ pcie_capability_clear_and_set_word(adapter->pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
return 0;
}
@@ -9736,3 +9845,91 @@ int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
NULL, 1);
}
+
+/**
+ * t4_i2c_rd - read I2C data from adapter
+ * @adap: the adapter
+ * @port: Port number if per-port device; <0 if not
+ * @devid: per-port device ID or absolute device ID
+ * @offset: byte offset into device I2C space
+ * @len: byte length of I2C space data
+ * @buf: buffer in which to return I2C data
+ *
+ * Reads the I2C data from the indicated device and location.
+ */
+int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
+ unsigned int devid, unsigned int offset,
+ unsigned int len, u8 *buf)
+{
+ struct fw_ldst_cmd ldst_cmd, ldst_rpl;
+ unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data);
+ int ret = 0;
+
+ if (len > I2C_PAGE_SIZE)
+ return -EINVAL;
+
+ /* Dont allow reads that spans multiple pages */
+ if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE)
+ return -EINVAL;
+
+ memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+ ldst_cmd.op_to_addrspace =
+ cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_I2C));
+ ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
+ ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port);
+ ldst_cmd.u.i2c.did = devid;
+
+ while (len > 0) {
+ unsigned int i2c_len = (len < i2c_max) ? len : i2c_max;
+
+ ldst_cmd.u.i2c.boffset = offset;
+ ldst_cmd.u.i2c.blen = i2c_len;
+
+ ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd),
+ &ldst_rpl);
+ if (ret)
+ break;
+
+ memcpy(buf, ldst_rpl.u.i2c.data, i2c_len);
+ offset += i2c_len;
+ buf += i2c_len;
+ len -= i2c_len;
+ }
+
+ return ret;
+}
+
+/**
+ * t4_set_vlan_acl - Set a VLAN id for the specified VF
+ * @adapter: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @vf: one of the VFs instantiated by the specified PF
+ * @vlan: The vlanid to be set
+ */
+int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
+ u16 vlan)
+{
+ struct fw_acl_vlan_cmd vlan_cmd;
+ unsigned int enable;
+
+ enable = (vlan ? FW_ACL_VLAN_CMD_EN_F : 0);
+ memset(&vlan_cmd, 0, sizeof(vlan_cmd));
+ vlan_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_VLAN_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_CMD_EXEC_F |
+ FW_ACL_VLAN_CMD_PFN_V(adap->pf) |
+ FW_ACL_VLAN_CMD_VFN_V(vf));
+ vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd));
+ /* Drop all packets that donot match vlan id */
+ vlan_cmd.dropnovlan_fm = FW_ACL_VLAN_CMD_FM_F;
+ if (enable != 0) {
+ vlan_cmd.nvlan = 1;
+ vlan_cmd.vlanid[0] = cpu_to_be16(vlan);
+ }
+
+ return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL);
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index a964ed184356..361d5032c288 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -38,21 +38,22 @@
#include <linux/types.h>
enum {
- NCHAN = 4, /* # of HW channels */
- MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */
- EEPROMSIZE = 17408, /* Serial EEPROM physical size */
- EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */
- EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */
- RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */
- TCB_SIZE = 128, /* TCB size */
- NMTUS = 16, /* size of MTU table */
- NCCTRL_WIN = 32, /* # of congestion control windows */
- NTX_SCHED = 8, /* # of HW Tx scheduling queues */
- PM_NSTATS = 5, /* # of PM stats */
- T6_PM_NSTATS = 7, /* # of PM stats in T6 */
- MBOX_LEN = 64, /* mailbox size in bytes */
- TRACE_LEN = 112, /* length of trace data and mask */
- FILTER_OPT_LEN = 36, /* filter tuple width for optional components */
+ NCHAN = 4, /* # of HW channels */
+ MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */
+ EEPROMSIZE = 17408,/* Serial EEPROM physical size */
+ EEPROMVSIZE = 32768,/* Serial EEPROM virtual address space size */
+ EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */
+ RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */
+ T6_RSS_NENTRIES = 4096, /* # of entries in RSS mapping table */
+ TCB_SIZE = 128, /* TCB size */
+ NMTUS = 16, /* size of MTU table */
+ NCCTRL_WIN = 32, /* # of congestion control windows */
+ NTX_SCHED = 8, /* # of HW Tx scheduling queues */
+ PM_NSTATS = 5, /* # of PM stats */
+ T6_PM_NSTATS = 7, /* # of PM stats in T6 */
+ MBOX_LEN = 64, /* mailbox size in bytes */
+ TRACE_LEN = 112, /* length of trace data and mask */
+ FILTER_OPT_LEN = 36, /* filter tuple width for optional components */
};
enum {
@@ -70,7 +71,9 @@ enum {
/* SGE context types */
enum ctxt_type {
- CTXT_FLM = 2,
+ CTXT_EGRESS,
+ CTXT_INGRESS,
+ CTXT_FLM,
CTXT_CNM,
};
@@ -284,4 +287,14 @@ enum {
#define SGE_TIMESTAMP_V(x) ((__u64)(x) << SGE_TIMESTAMP_S)
#define SGE_TIMESTAMP_G(x) (((__u64)(x) >> SGE_TIMESTAMP_S) & SGE_TIMESTAMP_M)
+#define I2C_DEV_ADDR_A0 0xa0
+#define I2C_DEV_ADDR_A2 0xa2
+#define I2C_PAGE_SIZE 0x100
+#define SFP_DIAG_TYPE_ADDR 0x5c
+#define SFP_DIAG_TYPE_LEN 0x1
+#define SFF_8472_COMP_ADDR 0x5e
+#define SFF_8472_COMP_LEN 0x1
+#define SFF_REV_ADDR 0x1
+#define SFF_REV_LEN 0x1
+
#endif /* __T4_HW_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 7e12f241145b..d0db4427b77e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -107,6 +107,7 @@ enum {
CPL_FW6_MSG = 0xE0,
CPL_FW6_PLD = 0xE1,
+ CPL_TX_TNL_LSO = 0xEC,
CPL_TX_PKT_LSO = 0xED,
CPL_TX_PKT_XT = 0xEE,
@@ -1479,6 +1480,169 @@ struct ulp_txpkt {
#define ULP_TXPKT_RO_V(x) ((x) << ULP_TXPKT_RO_S)
#define ULP_TXPKT_RO_F ULP_TXPKT_RO_V(1U)
+enum cpl_tx_tnl_lso_type {
+ TX_TNL_TYPE_OPAQUE,
+ TX_TNL_TYPE_NVGRE,
+ TX_TNL_TYPE_VXLAN,
+ TX_TNL_TYPE_GENEVE,
+};
+
+struct cpl_tx_tnl_lso {
+ __be32 op_to_IpIdSplitOut;
+ __be16 IpIdOffsetOut;
+ __be16 UdpLenSetOut_to_TnlHdrLen;
+ __be64 r1;
+ __be32 Flow_to_TcpHdrLen;
+ __be16 IpIdOffset;
+ __be16 IpIdSplit_to_Mss;
+ __be32 TCPSeqOffset;
+ __be32 EthLenOffset_Size;
+ /* encapsulated CPL (TX_PKT_XT) follows here */
+};
+
+#define CPL_TX_TNL_LSO_OPCODE_S 24
+#define CPL_TX_TNL_LSO_OPCODE_M 0xff
+#define CPL_TX_TNL_LSO_OPCODE_V(x) ((x) << CPL_TX_TNL_LSO_OPCODE_S)
+#define CPL_TX_TNL_LSO_OPCODE_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_OPCODE_S) & CPL_TX_TNL_LSO_OPCODE_M)
+
+#define CPL_TX_TNL_LSO_FIRST_S 23
+#define CPL_TX_TNL_LSO_FIRST_M 0x1
+#define CPL_TX_TNL_LSO_FIRST_V(x) ((x) << CPL_TX_TNL_LSO_FIRST_S)
+#define CPL_TX_TNL_LSO_FIRST_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_FIRST_S) & CPL_TX_TNL_LSO_FIRST_M)
+#define CPL_TX_TNL_LSO_FIRST_F CPL_TX_TNL_LSO_FIRST_V(1U)
+
+#define CPL_TX_TNL_LSO_LAST_S 22
+#define CPL_TX_TNL_LSO_LAST_M 0x1
+#define CPL_TX_TNL_LSO_LAST_V(x) ((x) << CPL_TX_TNL_LSO_LAST_S)
+#define CPL_TX_TNL_LSO_LAST_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_LAST_S) & CPL_TX_TNL_LSO_LAST_M)
+#define CPL_TX_TNL_LSO_LAST_F CPL_TX_TNL_LSO_LAST_V(1U)
+
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_S 21
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_M 0x1
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_V(x) \
+ ((x) << CPL_TX_TNL_LSO_ETHHDRLENXOUT_S)
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_ETHHDRLENXOUT_S) & \
+ CPL_TX_TNL_LSO_ETHHDRLENXOUT_M)
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_F CPL_TX_TNL_LSO_ETHHDRLENXOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_IPV6OUT_S 20
+#define CPL_TX_TNL_LSO_IPV6OUT_M 0x1
+#define CPL_TX_TNL_LSO_IPV6OUT_V(x) ((x) << CPL_TX_TNL_LSO_IPV6OUT_S)
+#define CPL_TX_TNL_LSO_IPV6OUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPV6OUT_S) & CPL_TX_TNL_LSO_IPV6OUT_M)
+#define CPL_TX_TNL_LSO_IPV6OUT_F CPL_TX_TNL_LSO_IPV6OUT_V(1U)
+
+#define CPL_TX_TNL_LSO_ETHHDRLEN_S 16
+#define CPL_TX_TNL_LSO_ETHHDRLEN_M 0xf
+#define CPL_TX_TNL_LSO_ETHHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_ETHHDRLEN_S)
+#define CPL_TX_TNL_LSO_ETHHDRLEN_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_ETHHDRLEN_S) & CPL_TX_TNL_LSO_ETHHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_IPHDRLEN_S 4
+#define CPL_TX_TNL_LSO_IPHDRLEN_M 0xfff
+#define CPL_TX_TNL_LSO_IPHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_IPHDRLEN_S)
+#define CPL_TX_TNL_LSO_IPHDRLEN_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPHDRLEN_S) & CPL_TX_TNL_LSO_IPHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_TCPHDRLEN_S 0
+#define CPL_TX_TNL_LSO_TCPHDRLEN_M 0xf
+#define CPL_TX_TNL_LSO_TCPHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_TCPHDRLEN_S)
+#define CPL_TX_TNL_LSO_TCPHDRLEN_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_TCPHDRLEN_S) & CPL_TX_TNL_LSO_TCPHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_MSS_S 0
+#define CPL_TX_TNL_LSO_MSS_M 0x3fff
+#define CPL_TX_TNL_LSO_MSS_V(x) ((x) << CPL_TX_TNL_LSO_MSS_S)
+#define CPL_TX_TNL_LSO_MSS_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_MSS_S) & CPL_TX_TNL_LSO_MSS_M)
+
+#define CPL_TX_TNL_LSO_SIZE_S 0
+#define CPL_TX_TNL_LSO_SIZE_M 0xfffffff
+#define CPL_TX_TNL_LSO_SIZE_V(x) ((x) << CPL_TX_TNL_LSO_SIZE_S)
+#define CPL_TX_TNL_LSO_SIZE_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_SIZE_S) & CPL_TX_TNL_LSO_SIZE_M)
+
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_S 16
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_M 0xf
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_V(x) \
+ ((x) << CPL_TX_TNL_LSO_ETHHDRLENOUT_S)
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_ETHHDRLENOUT_S) & CPL_TX_TNL_LSO_ETHHDRLENOUT_M)
+
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_S 4
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_M 0xfff
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPHDRLENOUT_S)
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPHDRLENOUT_S) & CPL_TX_TNL_LSO_IPHDRLENOUT_M)
+
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_S 3
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_M 0x1
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPHDRCHKOUT_S)
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPHDRCHKOUT_S) & CPL_TX_TNL_LSO_IPHDRCHKOUT_M)
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_F CPL_TX_TNL_LSO_IPHDRCHKOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_IPLENSETOUT_S 2
+#define CPL_TX_TNL_LSO_IPLENSETOUT_M 0x1
+#define CPL_TX_TNL_LSO_IPLENSETOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPLENSETOUT_S)
+#define CPL_TX_TNL_LSO_IPLENSETOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPLENSETOUT_S) & CPL_TX_TNL_LSO_IPLENSETOUT_M)
+#define CPL_TX_TNL_LSO_IPLENSETOUT_F CPL_TX_TNL_LSO_IPLENSETOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_IPIDINCOUT_S 1
+#define CPL_TX_TNL_LSO_IPIDINCOUT_M 0x1
+#define CPL_TX_TNL_LSO_IPIDINCOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPIDINCOUT_S)
+#define CPL_TX_TNL_LSO_IPIDINCOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPIDINCOUT_S) & CPL_TX_TNL_LSO_IPIDINCOUT_M)
+#define CPL_TX_TNL_LSO_IPIDINCOUT_F CPL_TX_TNL_LSO_IPIDINCOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_S 14
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_M 0x1
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_V(x) \
+ ((x) << CPL_TX_TNL_LSO_UDPCHKCLROUT_S)
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_UDPCHKCLROUT_S) & \
+ CPL_TX_TNL_LSO_UDPCHKCLROUT_M)
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_F CPL_TX_TNL_LSO_UDPCHKCLROUT_V(1U)
+
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_S 15
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_M 0x1
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_V(x) \
+ ((x) << CPL_TX_TNL_LSO_UDPLENSETOUT_S)
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_UDPLENSETOUT_S) & \
+ CPL_TX_TNL_LSO_UDPLENSETOUT_M)
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_F CPL_TX_TNL_LSO_UDPLENSETOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_TNLTYPE_S 12
+#define CPL_TX_TNL_LSO_TNLTYPE_M 0x3
+#define CPL_TX_TNL_LSO_TNLTYPE_V(x) ((x) << CPL_TX_TNL_LSO_TNLTYPE_S)
+#define CPL_TX_TNL_LSO_TNLTYPE_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_TNLTYPE_S) & CPL_TX_TNL_LSO_TNLTYPE_M)
+
+#define S_CPL_TX_TNL_LSO_ETHHDRLEN 16
+#define M_CPL_TX_TNL_LSO_ETHHDRLEN 0xf
+#define V_CPL_TX_TNL_LSO_ETHHDRLEN(x) ((x) << S_CPL_TX_TNL_LSO_ETHHDRLEN)
+#define G_CPL_TX_TNL_LSO_ETHHDRLEN(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_ETHHDRLEN) & M_CPL_TX_TNL_LSO_ETHHDRLEN)
+
+#define CPL_TX_TNL_LSO_TNLHDRLEN_S 0
+#define CPL_TX_TNL_LSO_TNLHDRLEN_M 0xfff
+#define CPL_TX_TNL_LSO_TNLHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_TNLHDRLEN_S)
+#define CPL_TX_TNL_LSO_TNLHDRLEN_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_TNLHDRLEN_S) & CPL_TX_TNL_LSO_TNLHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_IPV6_S 20
+#define CPL_TX_TNL_LSO_IPV6_M 0x1
+#define CPL_TX_TNL_LSO_IPV6_V(x) ((x) << CPL_TX_TNL_LSO_IPV6_S)
+#define CPL_TX_TNL_LSO_IPV6_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPV6_S) & CPL_TX_TNL_LSO_IPV6_M)
+#define CPL_TX_TNL_LSO_IPV6_F CPL_TX_TNL_LSO_IPV6_V(1U)
+
#define ULP_TX_SC_MORE_S 23
#define ULP_TX_SC_MORE_V(x) ((x) << ULP_TX_SC_MORE_S)
#define ULP_TX_SC_MORE_F ULP_TX_SC_MORE_V(1U)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 60cf9e02de5d..51b18035d691 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -183,6 +183,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x50a9), /* Custom T580-KR */
CH_PCI_ID_TABLE_FENTRY(0x50aa), /* Custom T580-CR */
CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */
/* T6 adapters:
*/
@@ -206,6 +207,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x6084), /* Custom T64100-CR QSFP28 */
CH_PCI_ID_TABLE_FENTRY(0x6085), /* Custom T6240-SO */
CH_PCI_ID_TABLE_FENTRY(0x6086), /* Custom T6225-SO-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6087), /* Custom T6225-CR */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index a7cfece72828..a6df73398d17 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -45,6 +45,9 @@
#define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE)
#define PF_REG(idx, reg) (PF_BASE(idx) + (reg))
+#define NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES 4
+#define NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES 16
+
#define MYPORT_BASE 0x1c000
#define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr))
@@ -961,6 +964,10 @@
#define MA_EXT_MEMORY1_BAR_A 0x7808
+#define HMA_MUX_S 5
+#define HMA_MUX_V(x) ((x) << HMA_MUX_S)
+#define HMA_MUX_F HMA_MUX_V(1U)
+
#define EXT_MEM1_BASE_S 16
#define EXT_MEM1_BASE_M 0xfffU
#define EXT_MEM1_BASE_G(x) (((x) >> EXT_MEM1_BASE_S) & EXT_MEM1_BASE_M)
@@ -2504,6 +2511,28 @@
#define MPS_RX_MAC_BG_PG_CNT0_A 0x11208
#define MPS_RX_LPBK_BG_PG_CNT0_A 0x11218
+#define MPS_RX_VXLAN_TYPE_A 0x11234
+
+#define VXLAN_EN_S 16
+#define VXLAN_EN_V(x) ((x) << VXLAN_EN_S)
+#define VXLAN_EN_F VXLAN_EN_V(1U)
+
+#define VXLAN_S 0
+#define VXLAN_M 0xffffU
+#define VXLAN_V(x) ((x) << VXLAN_S)
+#define VXLAN_G(x) (((x) >> VXLAN_S) & VXLAN_M)
+
+#define MPS_RX_GENEVE_TYPE_A 0x11238
+
+#define GENEVE_EN_S 16
+#define GENEVE_EN_V(x) ((x) << GENEVE_EN_S)
+#define GENEVE_EN_F GENEVE_EN_V(1U)
+
+#define GENEVE_S 0
+#define GENEVE_M 0xffffU
+#define GENEVE_V(x) ((x) << GENEVE_S)
+#define GENEVE_G(x) (((x) >> GENEVE_S) & GENEVE_M)
+
#define MPS_CLS_TCAM_Y_L_A 0xf000
#define MPS_CLS_TCAM_DATA0_A 0xf000
#define MPS_CLS_TCAM_DATA1_A 0xf004
@@ -2530,8 +2559,14 @@
#define DATAPORTNUM_S 12
#define DATAPORTNUM_M 0xfU
+#define DATAPORTNUM_V(x) ((x) << DATAPORTNUM_S)
#define DATAPORTNUM_G(x) (((x) >> DATAPORTNUM_S) & DATAPORTNUM_M)
+#define DATALKPTYPE_S 10
+#define DATALKPTYPE_M 0x3U
+#define DATALKPTYPE_V(x) ((x) << DATALKPTYPE_S)
+#define DATALKPTYPE_G(x) (((x) >> DATALKPTYPE_S) & DATALKPTYPE_M)
+
#define DATADIPHIT_S 8
#define DATADIPHIT_V(x) ((x) << DATADIPHIT_S)
#define DATADIPHIT_F DATADIPHIT_V(1U)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index be3658301832..0d83b4064a78 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -835,6 +835,7 @@ enum fw_ldst_addrspc {
FW_LDST_ADDRSPC_MPS = 0x0020,
FW_LDST_ADDRSPC_FUNC = 0x0028,
FW_LDST_ADDRSPC_FUNC_PCIE = 0x0029,
+ FW_LDST_ADDRSPC_I2C = 0x0038,
};
enum fw_ldst_mps_fid {
@@ -2066,6 +2067,7 @@ struct fw_vi_cmd {
#define FW_VI_MAC_ADD_MAC 0x3FF
#define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE
#define FW_VI_MAC_MAC_BASED_FREE 0x3FD
+#define FW_VI_MAC_ID_BASED_FREE 0x3FC
#define FW_CLS_TCAM_NUM_ENTRIES 336
enum fw_vi_mac_smac {
@@ -2082,6 +2084,13 @@ enum fw_vi_mac_result {
FW_VI_MAC_R_F_ACL_CHECK
};
+enum fw_vi_mac_entry_types {
+ FW_VI_MAC_TYPE_EXACTMAC,
+ FW_VI_MAC_TYPE_HASHVEC,
+ FW_VI_MAC_TYPE_RAW,
+ FW_VI_MAC_TYPE_EXACTMAC_VNI,
+};
+
struct fw_vi_mac_cmd {
__be32 op_to_viid;
__be32 freemacs_to_len16;
@@ -2093,6 +2102,13 @@ struct fw_vi_mac_cmd {
struct fw_vi_mac_hash {
__be64 hashvec;
} hash;
+ struct fw_vi_mac_raw {
+ __be32 raw_idx_pkd;
+ __be32 data0_pkd;
+ __be32 data1[2];
+ __be64 data0m_pkd;
+ __be32 data1m[2];
+ } raw;
} u;
};
@@ -2102,6 +2118,12 @@ struct fw_vi_mac_cmd {
#define FW_VI_MAC_CMD_FREEMACS_S 31
#define FW_VI_MAC_CMD_FREEMACS_V(x) ((x) << FW_VI_MAC_CMD_FREEMACS_S)
+#define FW_VI_MAC_CMD_ENTRY_TYPE_S 23
+#define FW_VI_MAC_CMD_ENTRY_TYPE_M 0x7
+#define FW_VI_MAC_CMD_ENTRY_TYPE_V(x) ((x) << FW_VI_MAC_CMD_ENTRY_TYPE_S)
+#define FW_VI_MAC_CMD_ENTRY_TYPE_G(x) \
+ (((x) >> FW_VI_MAC_CMD_ENTRY_TYPE_S) & FW_VI_MAC_CMD_ENTRY_TYPE_M)
+
#define FW_VI_MAC_CMD_HASHVECEN_S 23
#define FW_VI_MAC_CMD_HASHVECEN_V(x) ((x) << FW_VI_MAC_CMD_HASHVECEN_S)
#define FW_VI_MAC_CMD_HASHVECEN_F FW_VI_MAC_CMD_HASHVECEN_V(1U)
@@ -2128,6 +2150,12 @@ struct fw_vi_mac_cmd {
#define FW_VI_MAC_CMD_IDX_G(x) \
(((x) >> FW_VI_MAC_CMD_IDX_S) & FW_VI_MAC_CMD_IDX_M)
+#define FW_VI_MAC_CMD_RAW_IDX_S 16
+#define FW_VI_MAC_CMD_RAW_IDX_M 0xffff
+#define FW_VI_MAC_CMD_RAW_IDX_V(x) ((x) << FW_VI_MAC_CMD_RAW_IDX_S)
+#define FW_VI_MAC_CMD_RAW_IDX_G(x) \
+ (((x) >> FW_VI_MAC_CMD_RAW_IDX_S) & FW_VI_MAC_CMD_RAW_IDX_M)
+
#define FW_RXMODE_MTU_NO_CHG 65535
struct fw_vi_rxmode_cmd {
@@ -2332,14 +2360,22 @@ struct fw_acl_vlan_cmd {
#define FW_ACL_VLAN_CMD_VFN_S 0
#define FW_ACL_VLAN_CMD_VFN_V(x) ((x) << FW_ACL_VLAN_CMD_VFN_S)
-#define FW_ACL_VLAN_CMD_EN_S 31
-#define FW_ACL_VLAN_CMD_EN_V(x) ((x) << FW_ACL_VLAN_CMD_EN_S)
+#define FW_ACL_VLAN_CMD_EN_S 31
+#define FW_ACL_VLAN_CMD_EN_M 0x1
+#define FW_ACL_VLAN_CMD_EN_V(x) ((x) << FW_ACL_VLAN_CMD_EN_S)
+#define FW_ACL_VLAN_CMD_EN_G(x) \
+ (((x) >> S_FW_ACL_VLAN_CMD_EN_S) & FW_ACL_VLAN_CMD_EN_M)
+#define FW_ACL_VLAN_CMD_EN_F FW_ACL_VLAN_CMD_EN_V(1U)
#define FW_ACL_VLAN_CMD_DROPNOVLAN_S 7
#define FW_ACL_VLAN_CMD_DROPNOVLAN_V(x) ((x) << FW_ACL_VLAN_CMD_DROPNOVLAN_S)
-#define FW_ACL_VLAN_CMD_FM_S 6
-#define FW_ACL_VLAN_CMD_FM_V(x) ((x) << FW_ACL_VLAN_CMD_FM_S)
+#define FW_ACL_VLAN_CMD_FM_S 6
+#define FW_ACL_VLAN_CMD_FM_M 0x1
+#define FW_ACL_VLAN_CMD_FM_V(x) ((x) << FW_ACL_VLAN_CMD_FM_S)
+#define FW_ACL_VLAN_CMD_FM_G(x) \
+ (((x) >> FW_ACL_VLAN_CMD_FM_S) & FW_ACL_VLAN_CMD_FM_M)
+#define FW_ACL_VLAN_CMD_FM_F FW_ACL_VLAN_CMD_FM_V(1U)
/* old 16-bit port capabilities bitmap (fw_port_cap16_t) */
enum fw_port_cap {
@@ -2835,6 +2871,7 @@ enum fw_port_type {
FW_PORT_TYPE_CR2_QSFP,
FW_PORT_TYPE_SFP28,
FW_PORT_TYPE_KR_SFP28,
+ FW_PORT_TYPE_KR_XLAUI,
FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 08c6ddb84a04..5883f09e3804 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -92,6 +92,7 @@ struct sge_rspq;
*/
struct port_info {
struct adapter *adapter; /* our adapter */
+ u32 vlan_id; /* vlan id for VST */
u16 viid; /* virtual interface ID */
s16 xact_addr_filt; /* index of our MAC address filter */
u16 rss_size; /* size of VI's RSS table slice */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index b48361cfdc78..b7e79e64d2ed 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -791,6 +791,8 @@ static int cxgb4vf_open(struct net_device *dev)
if (err)
goto err_unwind;
+ pi->vlan_id = t4vf_get_vf_vlan_acl(adapter);
+
netif_tx_start_all_queues(dev);
set_bit(pi->port_id, &adapter->open_device_map);
return 0;
@@ -1229,7 +1231,8 @@ static int from_fw_port_mod_type(enum fw_port_type port_type,
else
return PORT_OTHER;
} else if (port_type == FW_PORT_TYPE_KR4_100G ||
- port_type == FW_PORT_TYPE_KR_SFP28) {
+ port_type == FW_PORT_TYPE_KR_SFP28 ||
+ port_type == FW_PORT_TYPE_KR_XLAUI) {
return PORT_NONE;
}
@@ -1323,6 +1326,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
SET_LMM(25000baseKR_Full);
break;
+ case FW_PORT_TYPE_KR_XLAUI:
+ SET_LMM(Backplane);
+ FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
+ FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full);
+ break;
+
case FW_PORT_TYPE_CR2_QSFP:
SET_LMM(FIBRE);
SET_LMM(50000baseSR2_Full);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 14d7e673c656..dfce5df7538e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1202,6 +1202,10 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
BUG_ON(qidx >= pi->nqsets);
txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
+ if (pi->vlan_id && !skb_vlan_tag_present(skb))
+ __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
+ pi->vlan_id);
+
/*
* Take this opportunity to reclaim any TX Descriptors whose DMA
* transfers have completed.
@@ -1570,6 +1574,7 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
{
struct adapter *adapter = rxq->rspq.adapter;
struct sge *s = &adapter->sge;
+ struct port_info *pi;
int ret;
struct sk_buff *skb;
@@ -1586,8 +1591,9 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb->truesize += skb->data_len;
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rxq->rspq.idx);
+ pi = netdev_priv(skb->dev);
- if (pkt->vlan_ex) {
+ if (pkt->vlan_ex && !pi->vlan_id) {
__vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
be16_to_cpu(pkt->vlan));
rxq->stats.vlan_ex++;
@@ -1620,6 +1626,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
struct adapter *adapter = rspq->adapter;
struct sge *s = &adapter->sge;
+ struct port_info *pi;
/*
* If this is a good TCP packet and we have Generic Receive Offload
@@ -1644,6 +1651,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
__skb_pull(skb, s->pktshift);
skb->protocol = eth_type_trans(skb, rspq->netdev);
skb_record_rx_queue(skb, rspq->idx);
+ pi = netdev_priv(skb->dev);
rxq->stats.pkts++;
if (csum_ok && !pkt->err_vec &&
@@ -1660,9 +1668,10 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
} else
skb_checksum_none_assert(skb);
- if (pkt->vlan_ex) {
+ if (pkt->vlan_ex && !pi->vlan_id) {
rxq->stats.vlan_ex++;
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(pkt->vlan));
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ be16_to_cpu(pkt->vlan));
}
netif_receive_skb(skb);
@@ -2619,8 +2628,8 @@ void t4vf_sge_stop(struct adapter *adapter)
int t4vf_sge_init(struct adapter *adapter)
{
struct sge_params *sge_params = &adapter->params.sge;
- u32 fl0 = sge_params->sge_fl_buffer_size[0];
- u32 fl1 = sge_params->sge_fl_buffer_size[1];
+ u32 fl_small_pg = sge_params->sge_fl_buffer_size[0];
+ u32 fl_large_pg = sge_params->sge_fl_buffer_size[1];
struct sge *s = &adapter->sge;
/*
@@ -2628,9 +2637,20 @@ int t4vf_sge_init(struct adapter *adapter)
* the Physical Function Driver. Ideally we should be able to deal
* with _any_ configuration. Practice is different ...
*/
- if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) {
+
+ /* We only bother using the Large Page logic if the Large Page Buffer
+ * is larger than our Page Size Buffer.
+ */
+ if (fl_large_pg <= fl_small_pg)
+ fl_large_pg = 0;
+
+ /* The Page Size Buffer must be exactly equal to our Page Size and the
+ * Large Page Size Buffer should be 0 (per above) or a power of 2.
+ */
+ if (fl_small_pg != PAGE_SIZE ||
+ (fl_large_pg & (fl_large_pg - 1)) != 0) {
dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
- fl0, fl1);
+ fl_small_pg, fl_large_pg);
return -EINVAL;
}
if ((sge_params->sge_control & RXPKTCPLMODE_F) !=
@@ -2642,8 +2662,8 @@ int t4vf_sge_init(struct adapter *adapter)
/*
* Now translate the adapter parameters into our internal forms.
*/
- if (fl1)
- s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
+ if (fl_large_pg)
+ s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
? 128 : 64);
s->pktshift = PKTSHIFT_G(sge_params->sge_control);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 9cf9c56b0f73..712e8f0c71b4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -413,5 +413,6 @@ int t4vf_handle_fw_rpl(struct adapter *, const __be64 *);
int t4vf_prep_adapter(struct adapter *);
int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf,
unsigned int *naddr, u8 *addr);
+int t4vf_get_vf_vlan_acl(struct adapter *adapter);
#endif /* __T4VF_COMMON_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 67aec59a14e6..798695bf8678 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -2147,3 +2147,31 @@ int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf,
return ret;
}
+
+/**
+ * t4vf_get_vf_vlan_acl - Get the VLAN ID to be set to
+ * the VI of this VF.
+ * @adapter: The adapter
+ *
+ * Find the VLAN ID to be set to the VF's VI. The requested VLAN ID
+ * is from the host OS via callback in the PF driver.
+ */
+int t4vf_get_vf_vlan_acl(struct adapter *adapter)
+{
+ struct fw_acl_vlan_cmd cmd;
+ int vlan = 0;
+ int ret = 0;
+
+ cmd.op_to_vfn = htonl(FW_CMD_OP_V(FW_ACL_VLAN_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F);
+
+ /* Note: Do not enable the ACL */
+ cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
+
+ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &cmd);
+
+ if (!ret)
+ vlan = be16_to_cpu(cmd.vlanid[0]);
+
+ return vlan;
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 6a9527004cb1..9b218f0e5a4c 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -43,6 +43,8 @@
#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
+#define ENIC_WQ_NAPI_BUDGET 256
+
#define ENIC_AIC_LARGE_PKT_DIFF 3
struct enic_msix_entry {
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 462d0ce51240..efb9333c7cf8 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -18,6 +18,7 @@
#include <linux/netdevice.h>
#include <linux/ethtool.h>
+#include <linux/net_tstamp.h>
#include "enic_res.h"
#include "enic.h"
@@ -578,6 +579,16 @@ static int enic_set_rxfh(struct net_device *netdev, const u32 *indir,
return __enic_set_rsskey(enic);
}
+static int enic_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+
+ return 0;
+}
+
static const struct ethtool_ops enic_ethtool_ops = {
.get_drvinfo = enic_get_drvinfo,
.get_msglevel = enic_get_msglevel,
@@ -597,6 +608,7 @@ static const struct ethtool_ops enic_ethtool_ops = {
.get_rxfh = enic_get_rxfh,
.set_rxfh = enic_set_rxfh,
.get_link_ksettings = enic_get_ksettings,
+ .get_ts_info = enic_get_ts_info,
};
void enic_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index e130fb757e7b..f202ba72a811 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -856,6 +856,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
netif_tx_stop_queue(txq);
+ skb_tx_timestamp(skb);
if (!skb->xmit_more || netif_xmit_stopped(txq))
vnic_wq_doorbell(wq);
@@ -1499,7 +1500,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
unsigned int cq_wq = enic_cq_wq(enic, 0);
unsigned int intr = enic_legacy_io_intr();
unsigned int rq_work_to_do = budget;
- unsigned int wq_work_to_do = -1; /* no limit */
+ unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
unsigned int work_done, rq_work_done = 0, wq_work_done;
int err;
@@ -1597,7 +1598,7 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
struct vnic_wq *wq = &enic->wq[wq_index];
unsigned int cq;
unsigned int intr;
- unsigned int wq_work_to_do = -1; /* clean all desc possible */
+ unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
unsigned int wq_work_done;
unsigned int wq_irq;
diff --git a/drivers/net/ethernet/cortina/Kconfig b/drivers/net/ethernet/cortina/Kconfig
new file mode 100644
index 000000000000..89bc4579724d
--- /dev/null
+++ b/drivers/net/ethernet/cortina/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+# Cortina ethernet devices
+
+config NET_VENDOR_CORTINA
+ bool "Cortina Gemini devices"
+ default y
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+if NET_VENDOR_CORTINA
+
+config GEMINI_ETHERNET
+ tristate "Gemini Gigabit Ethernet support"
+ depends on OF
+ depends on HAS_IOMEM
+ select PHYLIB
+ select CRC32
+ ---help---
+ This driver supports StorLink SL351x (Gemini) dual Gigabit Ethernet.
+
+endif # NET_VENDOR_CORTINA
diff --git a/drivers/net/ethernet/cortina/Makefile b/drivers/net/ethernet/cortina/Makefile
new file mode 100644
index 000000000000..4e86d398a89c
--- /dev/null
+++ b/drivers/net/ethernet/cortina/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for the Cortina Gemini network device drivers.
+
+obj-$(CONFIG_GEMINI_ETHERNET) += gemini.o
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
new file mode 100644
index 000000000000..5eb999af2c40
--- /dev/null
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -0,0 +1,2593 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Ethernet device driver for Cortina Systems Gemini SoC
+ * Also known as the StorLink SL3512 and SL3516 (SL351x) or Lepus
+ * Net Engine and Gigabit Ethernet MAC (GMAC)
+ * This hardware contains a TCP Offload Engine (TOE) but currently the
+ * driver does not make use of it.
+ *
+ * Authors:
+ * Linus Walleij <linus.walleij@linaro.org>
+ * Tobias Waldvogel <tobias.waldvogel@gmail.com> (OpenWRT)
+ * Michał Mirosław <mirq-linux@rere.qmqm.pl>
+ * Paulius Zaleckas <paulius.zaleckas@gmail.com>
+ * Giuseppe De Robertis <Giuseppe.DeRobertis@ba.infn.it>
+ * Gary Chen & Ch Hsu Storlink Semiconductor
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/cache.h>
+#include <linux/interrupt.h>
+#include <linux/reset.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/skbuff.h>
+#include <linux/phy.h>
+#include <linux/crc32.h>
+#include <linux/ethtool.h>
+#include <linux/tcp.h>
+#include <linux/u64_stats_sync.h>
+
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+
+#include "gemini.h"
+
+#define DRV_NAME "gmac-gemini"
+#define DRV_VERSION "1.0"
+
+#define HSIZE_8 0x00
+#define HSIZE_16 0x01
+#define HSIZE_32 0x02
+
+#define HBURST_SINGLE 0x00
+#define HBURST_INCR 0x01
+#define HBURST_INCR4 0x02
+#define HBURST_INCR8 0x03
+
+#define HPROT_DATA_CACHE BIT(0)
+#define HPROT_PRIVILIGED BIT(1)
+#define HPROT_BUFFERABLE BIT(2)
+#define HPROT_CACHABLE BIT(3)
+
+#define DEFAULT_RX_COALESCE_NSECS 0
+#define DEFAULT_GMAC_RXQ_ORDER 9
+#define DEFAULT_GMAC_TXQ_ORDER 8
+#define DEFAULT_RX_BUF_ORDER 11
+#define DEFAULT_NAPI_WEIGHT 64
+#define TX_MAX_FRAGS 16
+#define TX_QUEUE_NUM 1 /* max: 6 */
+#define RX_MAX_ALLOC_ORDER 2
+
+#define GMAC0_IRQ0_2 (GMAC0_TXDERR_INT_BIT | GMAC0_TXPERR_INT_BIT | \
+ GMAC0_RXDERR_INT_BIT | GMAC0_RXPERR_INT_BIT)
+#define GMAC0_IRQ0_TXQ0_INTS (GMAC0_SWTQ00_EOF_INT_BIT | \
+ GMAC0_SWTQ00_FIN_INT_BIT)
+#define GMAC0_IRQ4_8 (GMAC0_MIB_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT)
+
+#define GMAC_OFFLOAD_FEATURES (NETIF_F_SG | NETIF_F_IP_CSUM | \
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
+
+/**
+ * struct gmac_queue_page - page buffer per-page info
+ */
+struct gmac_queue_page {
+ struct page *page;
+ dma_addr_t mapping;
+};
+
+struct gmac_txq {
+ struct gmac_txdesc *ring;
+ struct sk_buff **skb;
+ unsigned int cptr;
+ unsigned int noirq_packets;
+};
+
+struct gemini_ethernet;
+
+struct gemini_ethernet_port {
+ u8 id; /* 0 or 1 */
+
+ struct gemini_ethernet *geth;
+ struct net_device *netdev;
+ struct device *dev;
+ void __iomem *dma_base;
+ void __iomem *gmac_base;
+ struct clk *pclk;
+ struct reset_control *reset;
+ int irq;
+ __le32 mac_addr[3];
+
+ void __iomem *rxq_rwptr;
+ struct gmac_rxdesc *rxq_ring;
+ unsigned int rxq_order;
+
+ struct napi_struct napi;
+ struct hrtimer rx_coalesce_timer;
+ unsigned int rx_coalesce_nsecs;
+ unsigned int freeq_refill;
+ struct gmac_txq txq[TX_QUEUE_NUM];
+ unsigned int txq_order;
+ unsigned int irq_every_tx_packets;
+
+ dma_addr_t rxq_dma_base;
+ dma_addr_t txq_dma_base;
+
+ unsigned int msg_enable;
+ spinlock_t config_lock; /* Locks config register */
+
+ struct u64_stats_sync tx_stats_syncp;
+ struct u64_stats_sync rx_stats_syncp;
+ struct u64_stats_sync ir_stats_syncp;
+
+ struct rtnl_link_stats64 stats;
+ u64 hw_stats[RX_STATS_NUM];
+ u64 rx_stats[RX_STATUS_NUM];
+ u64 rx_csum_stats[RX_CHKSUM_NUM];
+ u64 rx_napi_exits;
+ u64 tx_frag_stats[TX_MAX_FRAGS];
+ u64 tx_frags_linearized;
+ u64 tx_hw_csummed;
+};
+
+struct gemini_ethernet {
+ struct device *dev;
+ void __iomem *base;
+ struct gemini_ethernet_port *port0;
+ struct gemini_ethernet_port *port1;
+
+ spinlock_t irq_lock; /* Locks IRQ-related registers */
+ unsigned int freeq_order;
+ unsigned int freeq_frag_order;
+ struct gmac_rxdesc *freeq_ring;
+ dma_addr_t freeq_dma_base;
+ struct gmac_queue_page *freeq_pages;
+ unsigned int num_freeq_pages;
+ spinlock_t freeq_lock; /* Locks queue from reentrance */
+};
+
+#define GMAC_STATS_NUM ( \
+ RX_STATS_NUM + RX_STATUS_NUM + RX_CHKSUM_NUM + 1 + \
+ TX_MAX_FRAGS + 2)
+
+static const char gmac_stats_strings[GMAC_STATS_NUM][ETH_GSTRING_LEN] = {
+ "GMAC_IN_DISCARDS",
+ "GMAC_IN_ERRORS",
+ "GMAC_IN_MCAST",
+ "GMAC_IN_BCAST",
+ "GMAC_IN_MAC1",
+ "GMAC_IN_MAC2",
+ "RX_STATUS_GOOD_FRAME",
+ "RX_STATUS_TOO_LONG_GOOD_CRC",
+ "RX_STATUS_RUNT_FRAME",
+ "RX_STATUS_SFD_NOT_FOUND",
+ "RX_STATUS_CRC_ERROR",
+ "RX_STATUS_TOO_LONG_BAD_CRC",
+ "RX_STATUS_ALIGNMENT_ERROR",
+ "RX_STATUS_TOO_LONG_BAD_ALIGN",
+ "RX_STATUS_RX_ERR",
+ "RX_STATUS_DA_FILTERED",
+ "RX_STATUS_BUFFER_FULL",
+ "RX_STATUS_11",
+ "RX_STATUS_12",
+ "RX_STATUS_13",
+ "RX_STATUS_14",
+ "RX_STATUS_15",
+ "RX_CHKSUM_IP_UDP_TCP_OK",
+ "RX_CHKSUM_IP_OK_ONLY",
+ "RX_CHKSUM_NONE",
+ "RX_CHKSUM_3",
+ "RX_CHKSUM_IP_ERR_UNKNOWN",
+ "RX_CHKSUM_IP_ERR",
+ "RX_CHKSUM_TCP_UDP_ERR",
+ "RX_CHKSUM_7",
+ "RX_NAPI_EXITS",
+ "TX_FRAGS[1]",
+ "TX_FRAGS[2]",
+ "TX_FRAGS[3]",
+ "TX_FRAGS[4]",
+ "TX_FRAGS[5]",
+ "TX_FRAGS[6]",
+ "TX_FRAGS[7]",
+ "TX_FRAGS[8]",
+ "TX_FRAGS[9]",
+ "TX_FRAGS[10]",
+ "TX_FRAGS[11]",
+ "TX_FRAGS[12]",
+ "TX_FRAGS[13]",
+ "TX_FRAGS[14]",
+ "TX_FRAGS[15]",
+ "TX_FRAGS[16+]",
+ "TX_FRAGS_LINEARIZED",
+ "TX_HW_CSUMMED",
+};
+
+static void gmac_dump_dma_state(struct net_device *netdev);
+
+static void gmac_update_config0_reg(struct net_device *netdev,
+ u32 val, u32 vmask)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&port->config_lock, flags);
+
+ reg = readl(port->gmac_base + GMAC_CONFIG0);
+ reg = (reg & ~vmask) | val;
+ writel(reg, port->gmac_base + GMAC_CONFIG0);
+
+ spin_unlock_irqrestore(&port->config_lock, flags);
+}
+
+static void gmac_enable_tx_rx(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&port->config_lock, flags);
+
+ reg = readl(port->gmac_base + GMAC_CONFIG0);
+ reg &= ~CONFIG0_TX_RX_DISABLE;
+ writel(reg, port->gmac_base + GMAC_CONFIG0);
+
+ spin_unlock_irqrestore(&port->config_lock, flags);
+}
+
+static void gmac_disable_tx_rx(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&port->config_lock, flags);
+
+ val = readl(port->gmac_base + GMAC_CONFIG0);
+ val |= CONFIG0_TX_RX_DISABLE;
+ writel(val, port->gmac_base + GMAC_CONFIG0);
+
+ spin_unlock_irqrestore(&port->config_lock, flags);
+
+ mdelay(10); /* let GMAC consume packet */
+}
+
+static void gmac_set_flow_control(struct net_device *netdev, bool tx, bool rx)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&port->config_lock, flags);
+
+ val = readl(port->gmac_base + GMAC_CONFIG0);
+ val &= ~CONFIG0_FLOW_CTL;
+ if (tx)
+ val |= CONFIG0_FLOW_TX;
+ if (rx)
+ val |= CONFIG0_FLOW_RX;
+ writel(val, port->gmac_base + GMAC_CONFIG0);
+
+ spin_unlock_irqrestore(&port->config_lock, flags);
+}
+
+static void gmac_speed_set(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+ union gmac_status status, old_status;
+ int pause_tx = 0;
+ int pause_rx = 0;
+
+ status.bits32 = readl(port->gmac_base + GMAC_STATUS);
+ old_status.bits32 = status.bits32;
+ status.bits.link = phydev->link;
+ status.bits.duplex = phydev->duplex;
+
+ switch (phydev->speed) {
+ case 1000:
+ status.bits.speed = GMAC_SPEED_1000;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ status.bits.mii_rmii = GMAC_PHY_RGMII_1000;
+ netdev_info(netdev, "connect to RGMII @ 1Gbit\n");
+ break;
+ case 100:
+ status.bits.speed = GMAC_SPEED_100;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
+ netdev_info(netdev, "connect to RGMII @ 100 Mbit\n");
+ break;
+ case 10:
+ status.bits.speed = GMAC_SPEED_10;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
+ netdev_info(netdev, "connect to RGMII @ 10 Mbit\n");
+ break;
+ default:
+ netdev_warn(netdev, "Not supported PHY speed (%d)\n",
+ phydev->speed);
+ }
+
+ if (phydev->duplex == DUPLEX_FULL) {
+ u16 lcladv = phy_read(phydev, MII_ADVERTISE);
+ u16 rmtadv = phy_read(phydev, MII_LPA);
+ u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+
+ if (cap & FLOW_CTRL_RX)
+ pause_rx = 1;
+ if (cap & FLOW_CTRL_TX)
+ pause_tx = 1;
+ }
+
+ gmac_set_flow_control(netdev, pause_tx, pause_rx);
+
+ if (old_status.bits32 == status.bits32)
+ return;
+
+ if (netif_msg_link(port)) {
+ phy_print_status(phydev);
+ netdev_info(netdev, "link flow control: %s\n",
+ phydev->pause
+ ? (phydev->asym_pause ? "tx" : "both")
+ : (phydev->asym_pause ? "rx" : "none")
+ );
+ }
+
+ gmac_disable_tx_rx(netdev);
+ writel(status.bits32, port->gmac_base + GMAC_STATUS);
+ gmac_enable_tx_rx(netdev);
+}
+
+static int gmac_setup_phy(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ union gmac_status status = { .bits32 = 0 };
+ struct device *dev = port->dev;
+ struct phy_device *phy;
+
+ phy = of_phy_get_and_connect(netdev,
+ dev->of_node,
+ gmac_speed_set);
+ if (!phy)
+ return -ENODEV;
+ netdev->phydev = phy;
+
+ netdev_info(netdev, "connected to PHY \"%s\"\n",
+ phydev_name(phy));
+ phy_attached_print(phy, "phy_id=0x%.8lx, phy_mode=%s\n",
+ (unsigned long)phy->phy_id,
+ phy_modes(phy->interface));
+
+ phy->supported &= PHY_GBIT_FEATURES;
+ phy->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause;
+ phy->advertising = phy->supported;
+
+ /* set PHY interface type */
+ switch (phy->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n");
+ status.bits.mii_rmii = GMAC_PHY_MII;
+ netdev_info(netdev, "connect to MII\n");
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n");
+ status.bits.mii_rmii = GMAC_PHY_GMII;
+ netdev_info(netdev, "connect to GMII\n");
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ dev_info(dev, "set GMAC0 and GMAC1 to MII/RGMII mode\n");
+ status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
+ netdev_info(netdev, "connect to RGMII\n");
+ break;
+ default:
+ netdev_err(netdev, "Unsupported MII interface\n");
+ phy_disconnect(phy);
+ netdev->phydev = NULL;
+ return -EINVAL;
+ }
+ writel(status.bits32, port->gmac_base + GMAC_STATUS);
+
+ return 0;
+}
+
+static int gmac_pick_rx_max_len(int max_l3_len)
+{
+ /* index = CONFIG_MAXLEN_XXX values */
+ static const int max_len[8] = {
+ 1536, 1518, 1522, 1542,
+ 9212, 10236, 1518, 1518
+ };
+ int i, n = 5;
+
+ max_l3_len += ETH_HLEN + VLAN_HLEN;
+
+ if (max_l3_len > max_len[n])
+ return -1;
+
+ for (i = 0; i < 5; i++) {
+ if (max_len[i] >= max_l3_len && max_len[i] < max_len[n])
+ n = i;
+ }
+
+ return n;
+}
+
+static int gmac_init(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ union gmac_config0 config0 = { .bits = {
+ .dis_tx = 1,
+ .dis_rx = 1,
+ .ipv4_rx_chksum = 1,
+ .ipv6_rx_chksum = 1,
+ .rx_err_detect = 1,
+ .rgmm_edge = 1,
+ .port0_chk_hwq = 1,
+ .port1_chk_hwq = 1,
+ .port0_chk_toeq = 1,
+ .port1_chk_toeq = 1,
+ .port0_chk_classq = 1,
+ .port1_chk_classq = 1,
+ } };
+ union gmac_ahb_weight ahb_weight = { .bits = {
+ .rx_weight = 1,
+ .tx_weight = 1,
+ .hash_weight = 1,
+ .pre_req = 0x1f,
+ .tq_dv_threshold = 0,
+ } };
+ union gmac_tx_wcr0 hw_weigh = { .bits = {
+ .hw_tq3 = 1,
+ .hw_tq2 = 1,
+ .hw_tq1 = 1,
+ .hw_tq0 = 1,
+ } };
+ union gmac_tx_wcr1 sw_weigh = { .bits = {
+ .sw_tq5 = 1,
+ .sw_tq4 = 1,
+ .sw_tq3 = 1,
+ .sw_tq2 = 1,
+ .sw_tq1 = 1,
+ .sw_tq0 = 1,
+ } };
+ union gmac_config1 config1 = { .bits = {
+ .set_threshold = 16,
+ .rel_threshold = 24,
+ } };
+ union gmac_config2 config2 = { .bits = {
+ .set_threshold = 16,
+ .rel_threshold = 32,
+ } };
+ union gmac_config3 config3 = { .bits = {
+ .set_threshold = 0,
+ .rel_threshold = 0,
+ } };
+ union gmac_config0 tmp;
+ u32 val;
+
+ config0.bits.max_len = gmac_pick_rx_max_len(netdev->mtu);
+ tmp.bits32 = readl(port->gmac_base + GMAC_CONFIG0);
+ config0.bits.reserved = tmp.bits.reserved;
+ writel(config0.bits32, port->gmac_base + GMAC_CONFIG0);
+ writel(config1.bits32, port->gmac_base + GMAC_CONFIG1);
+ writel(config2.bits32, port->gmac_base + GMAC_CONFIG2);
+ writel(config3.bits32, port->gmac_base + GMAC_CONFIG3);
+
+ val = readl(port->dma_base + GMAC_AHB_WEIGHT_REG);
+ writel(ahb_weight.bits32, port->dma_base + GMAC_AHB_WEIGHT_REG);
+
+ writel(hw_weigh.bits32,
+ port->dma_base + GMAC_TX_WEIGHTING_CTRL_0_REG);
+ writel(sw_weigh.bits32,
+ port->dma_base + GMAC_TX_WEIGHTING_CTRL_1_REG);
+
+ port->rxq_order = DEFAULT_GMAC_RXQ_ORDER;
+ port->txq_order = DEFAULT_GMAC_TXQ_ORDER;
+ port->rx_coalesce_nsecs = DEFAULT_RX_COALESCE_NSECS;
+
+ /* Mark every quarter of the queue a packet for interrupt
+ * in order to be able to wake up the queue if it was stopped
+ */
+ port->irq_every_tx_packets = 1 << (port->txq_order - 2);
+
+ return 0;
+}
+
+static void gmac_uninit(struct net_device *netdev)
+{
+ if (netdev->phydev)
+ phy_disconnect(netdev->phydev);
+}
+
+static int gmac_setup_txqs(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned int n_txq = netdev->num_tx_queues;
+ struct gemini_ethernet *geth = port->geth;
+ size_t entries = 1 << port->txq_order;
+ struct gmac_txq *txq = port->txq;
+ struct gmac_txdesc *desc_ring;
+ size_t len = n_txq * entries;
+ struct sk_buff **skb_tab;
+ void __iomem *rwptr_reg;
+ unsigned int r;
+ int i;
+
+ rwptr_reg = port->dma_base + GMAC_SW_TX_QUEUE0_PTR_REG;
+
+ skb_tab = kcalloc(len, sizeof(*skb_tab), GFP_KERNEL);
+ if (!skb_tab)
+ return -ENOMEM;
+
+ desc_ring = dma_alloc_coherent(geth->dev, len * sizeof(*desc_ring),
+ &port->txq_dma_base, GFP_KERNEL);
+
+ if (!desc_ring) {
+ kfree(skb_tab);
+ return -ENOMEM;
+ }
+
+ if (port->txq_dma_base & ~DMA_Q_BASE_MASK) {
+ dev_warn(geth->dev, "TX queue base it not aligned\n");
+ return -ENOMEM;
+ }
+
+ writel(port->txq_dma_base | port->txq_order,
+ port->dma_base + GMAC_SW_TX_QUEUE_BASE_REG);
+
+ for (i = 0; i < n_txq; i++) {
+ txq->ring = desc_ring;
+ txq->skb = skb_tab;
+ txq->noirq_packets = 0;
+
+ r = readw(rwptr_reg);
+ rwptr_reg += 2;
+ writew(r, rwptr_reg);
+ rwptr_reg += 2;
+ txq->cptr = r;
+
+ txq++;
+ desc_ring += entries;
+ skb_tab += entries;
+ }
+
+ return 0;
+}
+
+static void gmac_clean_txq(struct net_device *netdev, struct gmac_txq *txq,
+ unsigned int r)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned int m = (1 << port->txq_order) - 1;
+ struct gemini_ethernet *geth = port->geth;
+ unsigned int c = txq->cptr;
+ union gmac_txdesc_0 word0;
+ union gmac_txdesc_1 word1;
+ unsigned int hwchksum = 0;
+ unsigned long bytes = 0;
+ struct gmac_txdesc *txd;
+ unsigned short nfrags;
+ unsigned int errs = 0;
+ unsigned int pkts = 0;
+ unsigned int word3;
+ dma_addr_t mapping;
+
+ if (c == r)
+ return;
+
+ while (c != r) {
+ txd = txq->ring + c;
+ word0 = txd->word0;
+ word1 = txd->word1;
+ mapping = txd->word2.buf_adr;
+ word3 = txd->word3.bits32;
+
+ dma_unmap_single(geth->dev, mapping,
+ word0.bits.buffer_size, DMA_TO_DEVICE);
+
+ if (word3 & EOF_BIT)
+ dev_kfree_skb(txq->skb[c]);
+
+ c++;
+ c &= m;
+
+ if (!(word3 & SOF_BIT))
+ continue;
+
+ if (!word0.bits.status_tx_ok) {
+ errs++;
+ continue;
+ }
+
+ pkts++;
+ bytes += txd->word1.bits.byte_count;
+
+ if (word1.bits32 & TSS_CHECKUM_ENABLE)
+ hwchksum++;
+
+ nfrags = word0.bits.desc_count - 1;
+ if (nfrags) {
+ if (nfrags >= TX_MAX_FRAGS)
+ nfrags = TX_MAX_FRAGS - 1;
+
+ u64_stats_update_begin(&port->tx_stats_syncp);
+ port->tx_frag_stats[nfrags]++;
+ u64_stats_update_end(&port->ir_stats_syncp);
+ }
+ }
+
+ u64_stats_update_begin(&port->ir_stats_syncp);
+ port->stats.tx_errors += errs;
+ port->stats.tx_packets += pkts;
+ port->stats.tx_bytes += bytes;
+ port->tx_hw_csummed += hwchksum;
+ u64_stats_update_end(&port->ir_stats_syncp);
+
+ txq->cptr = c;
+}
+
+static void gmac_cleanup_txqs(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned int n_txq = netdev->num_tx_queues;
+ struct gemini_ethernet *geth = port->geth;
+ void __iomem *rwptr_reg;
+ unsigned int r, i;
+
+ rwptr_reg = port->dma_base + GMAC_SW_TX_QUEUE0_PTR_REG;
+
+ for (i = 0; i < n_txq; i++) {
+ r = readw(rwptr_reg);
+ rwptr_reg += 2;
+ writew(r, rwptr_reg);
+ rwptr_reg += 2;
+
+ gmac_clean_txq(netdev, port->txq + i, r);
+ }
+ writel(0, port->dma_base + GMAC_SW_TX_QUEUE_BASE_REG);
+
+ kfree(port->txq->skb);
+ dma_free_coherent(geth->dev,
+ n_txq * sizeof(*port->txq->ring) << port->txq_order,
+ port->txq->ring, port->txq_dma_base);
+}
+
+static int gmac_setup_rxq(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
+ struct nontoe_qhdr __iomem *qhdr;
+
+ qhdr = geth->base + TOE_DEFAULT_Q_HDR_BASE(netdev->dev_id);
+ port->rxq_rwptr = &qhdr->word1;
+
+ /* Remap a slew of memory to use for the RX queue */
+ port->rxq_ring = dma_alloc_coherent(geth->dev,
+ sizeof(*port->rxq_ring) << port->rxq_order,
+ &port->rxq_dma_base, GFP_KERNEL);
+ if (!port->rxq_ring)
+ return -ENOMEM;
+ if (port->rxq_dma_base & ~NONTOE_QHDR0_BASE_MASK) {
+ dev_warn(geth->dev, "RX queue base it not aligned\n");
+ return -ENOMEM;
+ }
+
+ writel(port->rxq_dma_base | port->rxq_order, &qhdr->word0);
+ writel(0, port->rxq_rwptr);
+ return 0;
+}
+
+static struct gmac_queue_page *
+gmac_get_queue_page(struct gemini_ethernet *geth,
+ struct gemini_ethernet_port *port,
+ dma_addr_t addr)
+{
+ struct gmac_queue_page *gpage;
+ dma_addr_t mapping;
+ int i;
+
+ /* Only look for even pages */
+ mapping = addr & PAGE_MASK;
+
+ if (!geth->freeq_pages) {
+ dev_err(geth->dev, "try to get page with no page list\n");
+ return NULL;
+ }
+
+ /* Look up a ring buffer page from virtual mapping */
+ for (i = 0; i < geth->num_freeq_pages; i++) {
+ gpage = &geth->freeq_pages[i];
+ if (gpage->mapping == mapping)
+ return gpage;
+ }
+
+ return NULL;
+}
+
+static void gmac_cleanup_rxq(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
+ struct gmac_rxdesc *rxd = port->rxq_ring;
+ static struct gmac_queue_page *gpage;
+ struct nontoe_qhdr __iomem *qhdr;
+ void __iomem *dma_reg;
+ void __iomem *ptr_reg;
+ dma_addr_t mapping;
+ union dma_rwptr rw;
+ unsigned int r, w;
+
+ qhdr = geth->base +
+ TOE_DEFAULT_Q_HDR_BASE(netdev->dev_id);
+ dma_reg = &qhdr->word0;
+ ptr_reg = &qhdr->word1;
+
+ rw.bits32 = readl(ptr_reg);
+ r = rw.bits.rptr;
+ w = rw.bits.wptr;
+ writew(r, ptr_reg + 2);
+
+ writel(0, dma_reg);
+
+ /* Loop from read pointer to write pointer of the RX queue
+ * and free up all pages by the queue.
+ */
+ while (r != w) {
+ mapping = rxd[r].word2.buf_adr;
+ r++;
+ r &= ((1 << port->rxq_order) - 1);
+
+ if (!mapping)
+ continue;
+
+ /* Freeq pointers are one page off */
+ gpage = gmac_get_queue_page(geth, port, mapping + PAGE_SIZE);
+ if (!gpage) {
+ dev_err(geth->dev, "could not find page\n");
+ continue;
+ }
+ /* Release the RX queue reference to the page */
+ put_page(gpage->page);
+ }
+
+ dma_free_coherent(geth->dev, sizeof(*port->rxq_ring) << port->rxq_order,
+ port->rxq_ring, port->rxq_dma_base);
+}
+
+static struct page *geth_freeq_alloc_map_page(struct gemini_ethernet *geth,
+ int pn)
+{
+ struct gmac_rxdesc *freeq_entry;
+ struct gmac_queue_page *gpage;
+ unsigned int fpp_order;
+ unsigned int frag_len;
+ dma_addr_t mapping;
+ struct page *page;
+ int i;
+
+ /* First allocate and DMA map a single page */
+ page = alloc_page(GFP_ATOMIC);
+ if (!page)
+ return NULL;
+
+ mapping = dma_map_single(geth->dev, page_address(page),
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(geth->dev, mapping)) {
+ put_page(page);
+ return NULL;
+ }
+
+ /* The assign the page mapping (physical address) to the buffer address
+ * in the hardware queue. PAGE_SHIFT on ARM is 12 (1 page is 4096 bytes,
+ * 4k), and the default RX frag order is 11 (fragments are up 20 2048
+ * bytes, 2k) so fpp_order (fragments per page order) is default 1. Thus
+ * each page normally needs two entries in the queue.
+ */
+ frag_len = 1 << geth->freeq_frag_order; /* Usually 2048 */
+ fpp_order = PAGE_SHIFT - geth->freeq_frag_order;
+ freeq_entry = geth->freeq_ring + (pn << fpp_order);
+ dev_dbg(geth->dev, "allocate page %d fragment length %d fragments per page %d, freeq entry %p\n",
+ pn, frag_len, (1 << fpp_order), freeq_entry);
+ for (i = (1 << fpp_order); i > 0; i--) {
+ freeq_entry->word2.buf_adr = mapping;
+ freeq_entry++;
+ mapping += frag_len;
+ }
+
+ /* If the freeq entry already has a page mapped, then unmap it. */
+ gpage = &geth->freeq_pages[pn];
+ if (gpage->page) {
+ mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr;
+ dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE);
+ /* This should be the last reference to the page so it gets
+ * released
+ */
+ put_page(gpage->page);
+ }
+
+ /* Then put our new mapping into the page table */
+ dev_dbg(geth->dev, "page %d, DMA addr: %08x, page %p\n",
+ pn, (unsigned int)mapping, page);
+ gpage->mapping = mapping;
+ gpage->page = page;
+
+ return page;
+}
+
+/**
+ * geth_fill_freeq() - Fill the freeq with empty fragments to use
+ * @geth: the ethernet adapter
+ * @refill: whether to reset the queue by filling in all freeq entries or
+ * just refill it, usually the interrupt to refill the queue happens when
+ * the queue is half empty.
+ */
+static unsigned int geth_fill_freeq(struct gemini_ethernet *geth, bool refill)
+{
+ unsigned int fpp_order = PAGE_SHIFT - geth->freeq_frag_order;
+ unsigned int count = 0;
+ unsigned int pn, epn;
+ unsigned long flags;
+ union dma_rwptr rw;
+ unsigned int m_pn;
+
+ /* Mask for page */
+ m_pn = (1 << (geth->freeq_order - fpp_order)) - 1;
+
+ spin_lock_irqsave(&geth->freeq_lock, flags);
+
+ rw.bits32 = readl(geth->base + GLOBAL_SWFQ_RWPTR_REG);
+ pn = (refill ? rw.bits.wptr : rw.bits.rptr) >> fpp_order;
+ epn = (rw.bits.rptr >> fpp_order) - 1;
+ epn &= m_pn;
+
+ /* Loop over the freeq ring buffer entries */
+ while (pn != epn) {
+ struct gmac_queue_page *gpage;
+ struct page *page;
+
+ gpage = &geth->freeq_pages[pn];
+ page = gpage->page;
+
+ dev_dbg(geth->dev, "fill entry %d page ref count %d add %d refs\n",
+ pn, page_ref_count(page), 1 << fpp_order);
+
+ if (page_ref_count(page) > 1) {
+ unsigned int fl = (pn - epn) & m_pn;
+
+ if (fl > 64 >> fpp_order)
+ break;
+
+ page = geth_freeq_alloc_map_page(geth, pn);
+ if (!page)
+ break;
+ }
+
+ /* Add one reference per fragment in the page */
+ page_ref_add(page, 1 << fpp_order);
+ count += 1 << fpp_order;
+ pn++;
+ pn &= m_pn;
+ }
+
+ writew(pn << fpp_order, geth->base + GLOBAL_SWFQ_RWPTR_REG + 2);
+
+ spin_unlock_irqrestore(&geth->freeq_lock, flags);
+
+ return count;
+}
+
+static int geth_setup_freeq(struct gemini_ethernet *geth)
+{
+ unsigned int fpp_order = PAGE_SHIFT - geth->freeq_frag_order;
+ unsigned int frag_len = 1 << geth->freeq_frag_order;
+ unsigned int len = 1 << geth->freeq_order;
+ unsigned int pages = len >> fpp_order;
+ union queue_threshold qt;
+ union dma_skb_size skbsz;
+ unsigned int filled;
+ unsigned int pn;
+
+ geth->freeq_ring = dma_alloc_coherent(geth->dev,
+ sizeof(*geth->freeq_ring) << geth->freeq_order,
+ &geth->freeq_dma_base, GFP_KERNEL);
+ if (!geth->freeq_ring)
+ return -ENOMEM;
+ if (geth->freeq_dma_base & ~DMA_Q_BASE_MASK) {
+ dev_warn(geth->dev, "queue ring base it not aligned\n");
+ goto err_freeq;
+ }
+
+ /* Allocate a mapping to page look-up index */
+ geth->freeq_pages = kzalloc(pages * sizeof(*geth->freeq_pages),
+ GFP_KERNEL);
+ if (!geth->freeq_pages)
+ goto err_freeq;
+ geth->num_freeq_pages = pages;
+
+ dev_info(geth->dev, "allocate %d pages for queue\n", pages);
+ for (pn = 0; pn < pages; pn++)
+ if (!geth_freeq_alloc_map_page(geth, pn))
+ goto err_freeq_alloc;
+
+ filled = geth_fill_freeq(geth, false);
+ if (!filled)
+ goto err_freeq_alloc;
+
+ qt.bits32 = readl(geth->base + GLOBAL_QUEUE_THRESHOLD_REG);
+ qt.bits.swfq_empty = 32;
+ writel(qt.bits32, geth->base + GLOBAL_QUEUE_THRESHOLD_REG);
+
+ skbsz.bits.sw_skb_size = 1 << geth->freeq_frag_order;
+ writel(skbsz.bits32, geth->base + GLOBAL_DMA_SKB_SIZE_REG);
+ writel(geth->freeq_dma_base | geth->freeq_order,
+ geth->base + GLOBAL_SW_FREEQ_BASE_SIZE_REG);
+
+ return 0;
+
+err_freeq_alloc:
+ while (pn > 0) {
+ struct gmac_queue_page *gpage;
+ dma_addr_t mapping;
+
+ --pn;
+ mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr;
+ dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE);
+ gpage = &geth->freeq_pages[pn];
+ put_page(gpage->page);
+ }
+
+ kfree(geth->freeq_pages);
+err_freeq:
+ dma_free_coherent(geth->dev,
+ sizeof(*geth->freeq_ring) << geth->freeq_order,
+ geth->freeq_ring, geth->freeq_dma_base);
+ geth->freeq_ring = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * geth_cleanup_freeq() - cleanup the DMA mappings and free the queue
+ * @geth: the Gemini global ethernet state
+ */
+static void geth_cleanup_freeq(struct gemini_ethernet *geth)
+{
+ unsigned int fpp_order = PAGE_SHIFT - geth->freeq_frag_order;
+ unsigned int frag_len = 1 << geth->freeq_frag_order;
+ unsigned int len = 1 << geth->freeq_order;
+ unsigned int pages = len >> fpp_order;
+ unsigned int pn;
+
+ writew(readw(geth->base + GLOBAL_SWFQ_RWPTR_REG),
+ geth->base + GLOBAL_SWFQ_RWPTR_REG + 2);
+ writel(0, geth->base + GLOBAL_SW_FREEQ_BASE_SIZE_REG);
+
+ for (pn = 0; pn < pages; pn++) {
+ struct gmac_queue_page *gpage;
+ dma_addr_t mapping;
+
+ mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr;
+ dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE);
+
+ gpage = &geth->freeq_pages[pn];
+ while (page_ref_count(gpage->page) > 0)
+ put_page(gpage->page);
+ }
+
+ kfree(geth->freeq_pages);
+
+ dma_free_coherent(geth->dev,
+ sizeof(*geth->freeq_ring) << geth->freeq_order,
+ geth->freeq_ring, geth->freeq_dma_base);
+}
+
+/**
+ * geth_resize_freeq() - resize the software queue depth
+ * @port: the port requesting the change
+ *
+ * This gets called at least once during probe() so the device queue gets
+ * "resized" from the hardware defaults. Since both ports/net devices share
+ * the same hardware queue, some synchronization between the ports is
+ * needed.
+ */
+static int geth_resize_freeq(struct gemini_ethernet_port *port)
+{
+ struct gemini_ethernet *geth = port->geth;
+ struct net_device *netdev = port->netdev;
+ struct gemini_ethernet_port *other_port;
+ struct net_device *other_netdev;
+ unsigned int new_size = 0;
+ unsigned int new_order;
+ unsigned long flags;
+ u32 en;
+ int ret;
+
+ if (netdev->dev_id == 0)
+ other_netdev = geth->port1->netdev;
+ else
+ other_netdev = geth->port0->netdev;
+
+ if (other_netdev && netif_running(other_netdev))
+ return -EBUSY;
+
+ new_size = 1 << (port->rxq_order + 1);
+ netdev_dbg(netdev, "port %d size: %d order %d\n",
+ netdev->dev_id,
+ new_size,
+ port->rxq_order);
+ if (other_netdev) {
+ other_port = netdev_priv(other_netdev);
+ new_size += 1 << (other_port->rxq_order + 1);
+ netdev_dbg(other_netdev, "port %d size: %d order %d\n",
+ other_netdev->dev_id,
+ (1 << (other_port->rxq_order + 1)),
+ other_port->rxq_order);
+ }
+
+ new_order = min(15, ilog2(new_size - 1) + 1);
+ dev_dbg(geth->dev, "set shared queue to size %d order %d\n",
+ new_size, new_order);
+ if (geth->freeq_order == new_order)
+ return 0;
+
+ spin_lock_irqsave(&geth->irq_lock, flags);
+
+ /* Disable the software queue IRQs */
+ en = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ en &= ~SWFQ_EMPTY_INT_BIT;
+ writel(en, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+
+ /* Drop the old queue */
+ if (geth->freeq_ring)
+ geth_cleanup_freeq(geth);
+
+ /* Allocate a new queue with the desired order */
+ geth->freeq_order = new_order;
+ ret = geth_setup_freeq(geth);
+
+ /* Restart the interrupts - NOTE if this is the first resize
+ * after probe(), this is where the interrupts get turned on
+ * in the first place.
+ */
+ spin_lock_irqsave(&geth->irq_lock, flags);
+ en |= SWFQ_EMPTY_INT_BIT;
+ writel(en, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+
+ return ret;
+}
+
+static void gmac_tx_irq_enable(struct net_device *netdev,
+ unsigned int txq, int en)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
+ u32 val, mask;
+
+ netdev_dbg(netdev, "%s device %d\n", __func__, netdev->dev_id);
+
+ mask = GMAC0_IRQ0_TXQ0_INTS << (6 * netdev->dev_id + txq);
+
+ if (en)
+ writel(mask, geth->base + GLOBAL_INTERRUPT_STATUS_0_REG);
+
+ val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+ val = en ? val | mask : val & ~mask;
+ writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+}
+
+static void gmac_tx_irq(struct net_device *netdev, unsigned int txq_num)
+{
+ struct netdev_queue *ntxq = netdev_get_tx_queue(netdev, txq_num);
+
+ gmac_tx_irq_enable(netdev, txq_num, 0);
+ netif_tx_wake_queue(ntxq);
+}
+
+static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
+ struct gmac_txq *txq, unsigned short *desc)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct skb_shared_info *skb_si = skb_shinfo(skb);
+ unsigned short m = (1 << port->txq_order) - 1;
+ short frag, last_frag = skb_si->nr_frags - 1;
+ struct gemini_ethernet *geth = port->geth;
+ unsigned int word1, word3, buflen;
+ unsigned short w = *desc;
+ struct gmac_txdesc *txd;
+ skb_frag_t *skb_frag;
+ dma_addr_t mapping;
+ unsigned short mtu;
+ void *buffer;
+
+ mtu = ETH_HLEN;
+ mtu += netdev->mtu;
+ if (skb->protocol == htons(ETH_P_8021Q))
+ mtu += VLAN_HLEN;
+
+ word1 = skb->len;
+ word3 = SOF_BIT;
+
+ if (word1 > mtu) {
+ word1 |= TSS_MTU_ENABLE_BIT;
+ word3 |= mtu;
+ }
+
+ if (skb->ip_summed != CHECKSUM_NONE) {
+ int tcp = 0;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ word1 |= TSS_IP_CHKSUM_BIT;
+ tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
+ } else { /* IPv6 */
+ word1 |= TSS_IPV6_ENABLE_BIT;
+ tcp = ipv6_hdr(skb)->nexthdr == IPPROTO_TCP;
+ }
+
+ word1 |= tcp ? TSS_TCP_CHKSUM_BIT : TSS_UDP_CHKSUM_BIT;
+ }
+
+ frag = -1;
+ while (frag <= last_frag) {
+ if (frag == -1) {
+ buffer = skb->data;
+ buflen = skb_headlen(skb);
+ } else {
+ skb_frag = skb_si->frags + frag;
+ buffer = page_address(skb_frag_page(skb_frag)) +
+ skb_frag->page_offset;
+ buflen = skb_frag->size;
+ }
+
+ if (frag == last_frag) {
+ word3 |= EOF_BIT;
+ txq->skb[w] = skb;
+ }
+
+ mapping = dma_map_single(geth->dev, buffer, buflen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(geth->dev, mapping))
+ goto map_error;
+
+ txd = txq->ring + w;
+ txd->word0.bits32 = buflen;
+ txd->word1.bits32 = word1;
+ txd->word2.buf_adr = mapping;
+ txd->word3.bits32 = word3;
+
+ word3 &= MTU_SIZE_BIT_MASK;
+ w++;
+ w &= m;
+ frag++;
+ }
+
+ *desc = w;
+ return 0;
+
+map_error:
+ while (w != *desc) {
+ w--;
+ w &= m;
+
+ dma_unmap_page(geth->dev, txq->ring[w].word2.buf_adr,
+ txq->ring[w].word0.bits.buffer_size,
+ DMA_TO_DEVICE);
+ }
+ return -ENOMEM;
+}
+
+static int gmac_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned short m = (1 << port->txq_order) - 1;
+ struct netdev_queue *ntxq;
+ unsigned short r, w, d;
+ void __iomem *ptr_reg;
+ struct gmac_txq *txq;
+ int txq_num, nfrags;
+ union dma_rwptr rw;
+
+ SKB_FRAG_ASSERT(skb);
+
+ if (skb->len >= 0x10000)
+ goto out_drop_free;
+
+ txq_num = skb_get_queue_mapping(skb);
+ ptr_reg = port->dma_base + GMAC_SW_TX_QUEUE_PTR_REG(txq_num);
+ txq = &port->txq[txq_num];
+ ntxq = netdev_get_tx_queue(netdev, txq_num);
+ nfrags = skb_shinfo(skb)->nr_frags;
+
+ rw.bits32 = readl(ptr_reg);
+ r = rw.bits.rptr;
+ w = rw.bits.wptr;
+
+ d = txq->cptr - w - 1;
+ d &= m;
+
+ if (d < nfrags + 2) {
+ gmac_clean_txq(netdev, txq, r);
+ d = txq->cptr - w - 1;
+ d &= m;
+
+ if (d < nfrags + 2) {
+ netif_tx_stop_queue(ntxq);
+
+ d = txq->cptr + nfrags + 16;
+ d &= m;
+ txq->ring[d].word3.bits.eofie = 1;
+ gmac_tx_irq_enable(netdev, txq_num, 1);
+
+ u64_stats_update_begin(&port->tx_stats_syncp);
+ netdev->stats.tx_fifo_errors++;
+ u64_stats_update_end(&port->tx_stats_syncp);
+ return NETDEV_TX_BUSY;
+ }
+ }
+
+ if (gmac_map_tx_bufs(netdev, skb, txq, &w)) {
+ if (skb_linearize(skb))
+ goto out_drop;
+
+ u64_stats_update_begin(&port->tx_stats_syncp);
+ port->tx_frags_linearized++;
+ u64_stats_update_end(&port->tx_stats_syncp);
+
+ if (gmac_map_tx_bufs(netdev, skb, txq, &w))
+ goto out_drop_free;
+ }
+
+ writew(w, ptr_reg + 2);
+
+ gmac_clean_txq(netdev, txq, r);
+ return NETDEV_TX_OK;
+
+out_drop_free:
+ dev_kfree_skb(skb);
+out_drop:
+ u64_stats_update_begin(&port->tx_stats_syncp);
+ port->stats.tx_dropped++;
+ u64_stats_update_end(&port->tx_stats_syncp);
+ return NETDEV_TX_OK;
+}
+
+static void gmac_tx_timeout(struct net_device *netdev)
+{
+ netdev_err(netdev, "Tx timeout\n");
+ gmac_dump_dma_state(netdev);
+}
+
+static void gmac_enable_irq(struct net_device *netdev, int enable)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
+ unsigned long flags;
+ u32 val, mask;
+
+ netdev_info(netdev, "%s device %d %s\n", __func__,
+ netdev->dev_id, enable ? "enable" : "disable");
+ spin_lock_irqsave(&geth->irq_lock, flags);
+
+ mask = GMAC0_IRQ0_2 << (netdev->dev_id * 2);
+ val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+ val = enable ? (val | mask) : (val & ~mask);
+ writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+
+ mask = DEFAULT_Q0_INT_BIT << netdev->dev_id;
+ val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+ val = enable ? (val | mask) : (val & ~mask);
+ writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+
+ mask = GMAC0_IRQ4_8 << (netdev->dev_id * 8);
+ val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ val = enable ? (val | mask) : (val & ~mask);
+ writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+}
+
+static void gmac_enable_rx_irq(struct net_device *netdev, int enable)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
+ unsigned long flags;
+ u32 val, mask;
+
+ netdev_dbg(netdev, "%s device %d %s\n", __func__, netdev->dev_id,
+ enable ? "enable" : "disable");
+ spin_lock_irqsave(&geth->irq_lock, flags);
+ mask = DEFAULT_Q0_INT_BIT << netdev->dev_id;
+
+ val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+ val = enable ? (val | mask) : (val & ~mask);
+ writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+}
+
+static struct sk_buff *gmac_skb_if_good_frame(struct gemini_ethernet_port *port,
+ union gmac_rxdesc_0 word0,
+ unsigned int frame_len)
+{
+ unsigned int rx_csum = word0.bits.chksum_status;
+ unsigned int rx_status = word0.bits.status;
+ struct sk_buff *skb = NULL;
+
+ port->rx_stats[rx_status]++;
+ port->rx_csum_stats[rx_csum]++;
+
+ if (word0.bits.derr || word0.bits.perr ||
+ rx_status || frame_len < ETH_ZLEN ||
+ rx_csum >= RX_CHKSUM_IP_ERR_UNKNOWN) {
+ port->stats.rx_errors++;
+
+ if (frame_len < ETH_ZLEN || RX_ERROR_LENGTH(rx_status))
+ port->stats.rx_length_errors++;
+ if (RX_ERROR_OVER(rx_status))
+ port->stats.rx_over_errors++;
+ if (RX_ERROR_CRC(rx_status))
+ port->stats.rx_crc_errors++;
+ if (RX_ERROR_FRAME(rx_status))
+ port->stats.rx_frame_errors++;
+ return NULL;
+ }
+
+ skb = napi_get_frags(&port->napi);
+ if (!skb)
+ goto update_exit;
+
+ if (rx_csum == RX_CHKSUM_IP_UDP_TCP_OK)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+update_exit:
+ port->stats.rx_bytes += frame_len;
+ port->stats.rx_packets++;
+ return skb;
+}
+
+static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned short m = (1 << port->rxq_order) - 1;
+ struct gemini_ethernet *geth = port->geth;
+ void __iomem *ptr_reg = port->rxq_rwptr;
+ unsigned int frame_len, frag_len;
+ struct gmac_rxdesc *rx = NULL;
+ struct gmac_queue_page *gpage;
+ static struct sk_buff *skb;
+ union gmac_rxdesc_0 word0;
+ union gmac_rxdesc_1 word1;
+ union gmac_rxdesc_3 word3;
+ struct page *page = NULL;
+ unsigned int page_offs;
+ unsigned short r, w;
+ union dma_rwptr rw;
+ dma_addr_t mapping;
+ int frag_nr = 0;
+
+ rw.bits32 = readl(ptr_reg);
+ /* Reset interrupt as all packages until here are taken into account */
+ writel(DEFAULT_Q0_INT_BIT << netdev->dev_id,
+ geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
+ r = rw.bits.rptr;
+ w = rw.bits.wptr;
+
+ while (budget && w != r) {
+ rx = port->rxq_ring + r;
+ word0 = rx->word0;
+ word1 = rx->word1;
+ mapping = rx->word2.buf_adr;
+ word3 = rx->word3;
+
+ r++;
+ r &= m;
+
+ frag_len = word0.bits.buffer_size;
+ frame_len = word1.bits.byte_count;
+ page_offs = mapping & ~PAGE_MASK;
+
+ if (!mapping) {
+ netdev_err(netdev,
+ "rxq[%u]: HW BUG: zero DMA desc\n", r);
+ goto err_drop;
+ }
+
+ /* Freeq pointers are one page off */
+ gpage = gmac_get_queue_page(geth, port, mapping + PAGE_SIZE);
+ if (!gpage) {
+ dev_err(geth->dev, "could not find mapping\n");
+ continue;
+ }
+ page = gpage->page;
+
+ if (word3.bits32 & SOF_BIT) {
+ if (skb) {
+ napi_free_frags(&port->napi);
+ port->stats.rx_dropped++;
+ }
+
+ skb = gmac_skb_if_good_frame(port, word0, frame_len);
+ if (!skb)
+ goto err_drop;
+
+ page_offs += NET_IP_ALIGN;
+ frag_len -= NET_IP_ALIGN;
+ frag_nr = 0;
+
+ } else if (!skb) {
+ put_page(page);
+ continue;
+ }
+
+ if (word3.bits32 & EOF_BIT)
+ frag_len = frame_len - skb->len;
+
+ /* append page frag to skb */
+ if (frag_nr == MAX_SKB_FRAGS)
+ goto err_drop;
+
+ if (frag_len == 0)
+ netdev_err(netdev, "Received fragment with len = 0\n");
+
+ skb_fill_page_desc(skb, frag_nr, page, page_offs, frag_len);
+ skb->len += frag_len;
+ skb->data_len += frag_len;
+ skb->truesize += frag_len;
+ frag_nr++;
+
+ if (word3.bits32 & EOF_BIT) {
+ napi_gro_frags(&port->napi);
+ skb = NULL;
+ --budget;
+ }
+ continue;
+
+err_drop:
+ if (skb) {
+ napi_free_frags(&port->napi);
+ skb = NULL;
+ }
+
+ if (mapping)
+ put_page(page);
+
+ port->stats.rx_dropped++;
+ }
+
+ writew(r, ptr_reg);
+ return budget;
+}
+
+static int gmac_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct gemini_ethernet_port *port = netdev_priv(napi->dev);
+ struct gemini_ethernet *geth = port->geth;
+ unsigned int freeq_threshold;
+ unsigned int received;
+
+ freeq_threshold = 1 << (geth->freeq_order - 1);
+ u64_stats_update_begin(&port->rx_stats_syncp);
+
+ received = gmac_rx(napi->dev, budget);
+ if (received < budget) {
+ napi_gro_flush(napi, false);
+ napi_complete_done(napi, received);
+ gmac_enable_rx_irq(napi->dev, 1);
+ ++port->rx_napi_exits;
+ }
+
+ port->freeq_refill += (budget - received);
+ if (port->freeq_refill > freeq_threshold) {
+ port->freeq_refill -= freeq_threshold;
+ geth_fill_freeq(geth, true);
+ }
+
+ u64_stats_update_end(&port->rx_stats_syncp);
+ return received;
+}
+
+static void gmac_dump_dma_state(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
+ void __iomem *ptr_reg;
+ u32 reg[5];
+
+ /* Interrupt status */
+ reg[0] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_0_REG);
+ reg[1] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
+ reg[2] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_2_REG);
+ reg[3] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_3_REG);
+ reg[4] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+ netdev_err(netdev, "IRQ status: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ reg[0], reg[1], reg[2], reg[3], reg[4]);
+
+ /* Interrupt enable */
+ reg[0] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+ reg[1] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+ reg[2] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_2_REG);
+ reg[3] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_3_REG);
+ reg[4] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ netdev_err(netdev, "IRQ enable: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ reg[0], reg[1], reg[2], reg[3], reg[4]);
+
+ /* RX DMA status */
+ reg[0] = readl(port->dma_base + GMAC_DMA_RX_FIRST_DESC_REG);
+ reg[1] = readl(port->dma_base + GMAC_DMA_RX_CURR_DESC_REG);
+ reg[2] = GET_RPTR(port->rxq_rwptr);
+ reg[3] = GET_WPTR(port->rxq_rwptr);
+ netdev_err(netdev, "RX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n",
+ reg[0], reg[1], reg[2], reg[3]);
+
+ reg[0] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD0_REG);
+ reg[1] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD1_REG);
+ reg[2] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD2_REG);
+ reg[3] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD3_REG);
+ netdev_err(netdev, "RX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ reg[0], reg[1], reg[2], reg[3]);
+
+ /* TX DMA status */
+ ptr_reg = port->dma_base + GMAC_SW_TX_QUEUE0_PTR_REG;
+
+ reg[0] = readl(port->dma_base + GMAC_DMA_TX_FIRST_DESC_REG);
+ reg[1] = readl(port->dma_base + GMAC_DMA_TX_CURR_DESC_REG);
+ reg[2] = GET_RPTR(ptr_reg);
+ reg[3] = GET_WPTR(ptr_reg);
+ netdev_err(netdev, "TX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n",
+ reg[0], reg[1], reg[2], reg[3]);
+
+ reg[0] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD0_REG);
+ reg[1] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD1_REG);
+ reg[2] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD2_REG);
+ reg[3] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD3_REG);
+ netdev_err(netdev, "TX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ reg[0], reg[1], reg[2], reg[3]);
+
+ /* FREE queues status */
+ ptr_reg = geth->base + GLOBAL_SWFQ_RWPTR_REG;
+
+ reg[0] = GET_RPTR(ptr_reg);
+ reg[1] = GET_WPTR(ptr_reg);
+
+ ptr_reg = geth->base + GLOBAL_HWFQ_RWPTR_REG;
+
+ reg[2] = GET_RPTR(ptr_reg);
+ reg[3] = GET_WPTR(ptr_reg);
+ netdev_err(netdev, "FQ SW ptr: %u %u, HW ptr: %u %u\n",
+ reg[0], reg[1], reg[2], reg[3]);
+}
+
+static void gmac_update_hw_stats(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned int rx_discards, rx_mcast, rx_bcast;
+ struct gemini_ethernet *geth = port->geth;
+ unsigned long flags;
+
+ spin_lock_irqsave(&geth->irq_lock, flags);
+ u64_stats_update_begin(&port->ir_stats_syncp);
+
+ rx_discards = readl(port->gmac_base + GMAC_IN_DISCARDS);
+ port->hw_stats[0] += rx_discards;
+ port->hw_stats[1] += readl(port->gmac_base + GMAC_IN_ERRORS);
+ rx_mcast = readl(port->gmac_base + GMAC_IN_MCAST);
+ port->hw_stats[2] += rx_mcast;
+ rx_bcast = readl(port->gmac_base + GMAC_IN_BCAST);
+ port->hw_stats[3] += rx_bcast;
+ port->hw_stats[4] += readl(port->gmac_base + GMAC_IN_MAC1);
+ port->hw_stats[5] += readl(port->gmac_base + GMAC_IN_MAC2);
+
+ port->stats.rx_missed_errors += rx_discards;
+ port->stats.multicast += rx_mcast;
+ port->stats.multicast += rx_bcast;
+
+ writel(GMAC0_MIB_INT_BIT << (netdev->dev_id * 8),
+ geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+
+ u64_stats_update_end(&port->ir_stats_syncp);
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+}
+
+/**
+ * gmac_get_intr_flags() - get interrupt status flags for a port from
+ * @netdev: the net device for the port to get flags from
+ * @i: the interrupt status register 0..4
+ */
+static u32 gmac_get_intr_flags(struct net_device *netdev, int i)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
+ void __iomem *irqif_reg, *irqen_reg;
+ unsigned int offs, val;
+
+ /* Calculate the offset using the stride of the status registers */
+ offs = i * (GLOBAL_INTERRUPT_STATUS_1_REG -
+ GLOBAL_INTERRUPT_STATUS_0_REG);
+
+ irqif_reg = geth->base + GLOBAL_INTERRUPT_STATUS_0_REG + offs;
+ irqen_reg = geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG + offs;
+
+ val = readl(irqif_reg) & readl(irqen_reg);
+ return val;
+}
+
+static enum hrtimer_restart gmac_coalesce_delay_expired(struct hrtimer *timer)
+{
+ struct gemini_ethernet_port *port =
+ container_of(timer, struct gemini_ethernet_port,
+ rx_coalesce_timer);
+
+ napi_schedule(&port->napi);
+ return HRTIMER_NORESTART;
+}
+
+static irqreturn_t gmac_irq(int irq, void *data)
+{
+ struct gemini_ethernet_port *port;
+ struct net_device *netdev = data;
+ struct gemini_ethernet *geth;
+ u32 val, orr = 0;
+
+ port = netdev_priv(netdev);
+ geth = port->geth;
+
+ val = gmac_get_intr_flags(netdev, 0);
+ orr |= val;
+
+ if (val & (GMAC0_IRQ0_2 << (netdev->dev_id * 2))) {
+ /* Oh, crap */
+ netdev_err(netdev, "hw failure/sw bug\n");
+ gmac_dump_dma_state(netdev);
+
+ /* don't know how to recover, just reduce losses */
+ gmac_enable_irq(netdev, 0);
+ return IRQ_HANDLED;
+ }
+
+ if (val & (GMAC0_IRQ0_TXQ0_INTS << (netdev->dev_id * 6)))
+ gmac_tx_irq(netdev, 0);
+
+ val = gmac_get_intr_flags(netdev, 1);
+ orr |= val;
+
+ if (val & (DEFAULT_Q0_INT_BIT << netdev->dev_id)) {
+ gmac_enable_rx_irq(netdev, 0);
+
+ if (!port->rx_coalesce_nsecs) {
+ napi_schedule(&port->napi);
+ } else {
+ ktime_t ktime;
+
+ ktime = ktime_set(0, port->rx_coalesce_nsecs);
+ hrtimer_start(&port->rx_coalesce_timer, ktime,
+ HRTIMER_MODE_REL);
+ }
+ }
+
+ val = gmac_get_intr_flags(netdev, 4);
+ orr |= val;
+
+ if (val & (GMAC0_MIB_INT_BIT << (netdev->dev_id * 8)))
+ gmac_update_hw_stats(netdev);
+
+ if (val & (GMAC0_RX_OVERRUN_INT_BIT << (netdev->dev_id * 8))) {
+ writel(GMAC0_RXDERR_INT_BIT << (netdev->dev_id * 8),
+ geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+
+ spin_lock(&geth->irq_lock);
+ u64_stats_update_begin(&port->ir_stats_syncp);
+ ++port->stats.rx_fifo_errors;
+ u64_stats_update_end(&port->ir_stats_syncp);
+ spin_unlock(&geth->irq_lock);
+ }
+
+ return orr ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static void gmac_start_dma(struct gemini_ethernet_port *port)
+{
+ void __iomem *dma_ctrl_reg = port->dma_base + GMAC_DMA_CTRL_REG;
+ union gmac_dma_ctrl dma_ctrl;
+
+ dma_ctrl.bits32 = readl(dma_ctrl_reg);
+ dma_ctrl.bits.rd_enable = 1;
+ dma_ctrl.bits.td_enable = 1;
+ dma_ctrl.bits.loopback = 0;
+ dma_ctrl.bits.drop_small_ack = 0;
+ dma_ctrl.bits.rd_insert_bytes = NET_IP_ALIGN;
+ dma_ctrl.bits.rd_prot = HPROT_DATA_CACHE | HPROT_PRIVILIGED;
+ dma_ctrl.bits.rd_burst_size = HBURST_INCR8;
+ dma_ctrl.bits.rd_bus = HSIZE_8;
+ dma_ctrl.bits.td_prot = HPROT_DATA_CACHE;
+ dma_ctrl.bits.td_burst_size = HBURST_INCR8;
+ dma_ctrl.bits.td_bus = HSIZE_8;
+
+ writel(dma_ctrl.bits32, dma_ctrl_reg);
+}
+
+static void gmac_stop_dma(struct gemini_ethernet_port *port)
+{
+ void __iomem *dma_ctrl_reg = port->dma_base + GMAC_DMA_CTRL_REG;
+ union gmac_dma_ctrl dma_ctrl;
+
+ dma_ctrl.bits32 = readl(dma_ctrl_reg);
+ dma_ctrl.bits.rd_enable = 0;
+ dma_ctrl.bits.td_enable = 0;
+ writel(dma_ctrl.bits32, dma_ctrl_reg);
+}
+
+static int gmac_open(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ int err;
+
+ if (!netdev->phydev) {
+ err = gmac_setup_phy(netdev);
+ if (err) {
+ netif_err(port, ifup, netdev,
+ "PHY init failed: %d\n", err);
+ return err;
+ }
+ }
+
+ err = request_irq(netdev->irq, gmac_irq,
+ IRQF_SHARED, netdev->name, netdev);
+ if (err) {
+ netdev_err(netdev, "no IRQ\n");
+ return err;
+ }
+
+ netif_carrier_off(netdev);
+ phy_start(netdev->phydev);
+
+ err = geth_resize_freeq(port);
+ if (err) {
+ netdev_err(netdev, "could not resize freeq\n");
+ goto err_stop_phy;
+ }
+
+ err = gmac_setup_rxq(netdev);
+ if (err) {
+ netdev_err(netdev, "could not setup RXQ\n");
+ goto err_stop_phy;
+ }
+
+ err = gmac_setup_txqs(netdev);
+ if (err) {
+ netdev_err(netdev, "could not setup TXQs\n");
+ gmac_cleanup_rxq(netdev);
+ goto err_stop_phy;
+ }
+
+ napi_enable(&port->napi);
+
+ gmac_start_dma(port);
+ gmac_enable_irq(netdev, 1);
+ gmac_enable_tx_rx(netdev);
+ netif_tx_start_all_queues(netdev);
+
+ hrtimer_init(&port->rx_coalesce_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ port->rx_coalesce_timer.function = &gmac_coalesce_delay_expired;
+
+ netdev_info(netdev, "opened\n");
+
+ return 0;
+
+err_stop_phy:
+ phy_stop(netdev->phydev);
+ free_irq(netdev->irq, netdev);
+ return err;
+}
+
+static int gmac_stop(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+ hrtimer_cancel(&port->rx_coalesce_timer);
+ netif_tx_stop_all_queues(netdev);
+ gmac_disable_tx_rx(netdev);
+ gmac_stop_dma(port);
+ napi_disable(&port->napi);
+
+ gmac_enable_irq(netdev, 0);
+ gmac_cleanup_rxq(netdev);
+ gmac_cleanup_txqs(netdev);
+
+ phy_stop(netdev->phydev);
+ free_irq(netdev->irq, netdev);
+
+ gmac_update_hw_stats(netdev);
+ return 0;
+}
+
+static void gmac_set_rx_mode(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ union gmac_rx_fltr filter = { .bits = {
+ .broadcast = 1,
+ .multicast = 1,
+ .unicast = 1,
+ } };
+ struct netdev_hw_addr *ha;
+ unsigned int bit_nr;
+ u32 mc_filter[2];
+
+ mc_filter[1] = 0;
+ mc_filter[0] = 0;
+
+ if (netdev->flags & IFF_PROMISC) {
+ filter.bits.error = 1;
+ filter.bits.promiscuous = 1;
+ mc_filter[1] = ~0;
+ mc_filter[0] = ~0;
+ } else if (netdev->flags & IFF_ALLMULTI) {
+ mc_filter[1] = ~0;
+ mc_filter[0] = ~0;
+ } else {
+ netdev_for_each_mc_addr(ha, netdev) {
+ bit_nr = ~crc32_le(~0, ha->addr, ETH_ALEN) & 0x3f;
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 0x1f);
+ }
+ }
+
+ writel(mc_filter[0], port->gmac_base + GMAC_MCAST_FIL0);
+ writel(mc_filter[1], port->gmac_base + GMAC_MCAST_FIL1);
+ writel(filter.bits32, port->gmac_base + GMAC_RX_FLTR);
+}
+
+static void gmac_write_mac_address(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ __le32 addr[3];
+
+ memset(addr, 0, sizeof(addr));
+ memcpy(addr, netdev->dev_addr, ETH_ALEN);
+
+ writel(le32_to_cpu(addr[0]), port->gmac_base + GMAC_STA_ADD0);
+ writel(le32_to_cpu(addr[1]), port->gmac_base + GMAC_STA_ADD1);
+ writel(le32_to_cpu(addr[2]), port->gmac_base + GMAC_STA_ADD2);
+}
+
+static int gmac_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct sockaddr *sa = addr;
+
+ memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN);
+ gmac_write_mac_address(netdev);
+
+ return 0;
+}
+
+static void gmac_clear_hw_stats(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+ readl(port->gmac_base + GMAC_IN_DISCARDS);
+ readl(port->gmac_base + GMAC_IN_ERRORS);
+ readl(port->gmac_base + GMAC_IN_MCAST);
+ readl(port->gmac_base + GMAC_IN_BCAST);
+ readl(port->gmac_base + GMAC_IN_MAC1);
+ readl(port->gmac_base + GMAC_IN_MAC2);
+}
+
+static void gmac_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned int start;
+
+ gmac_update_hw_stats(netdev);
+
+ /* Racing with RX NAPI */
+ do {
+ start = u64_stats_fetch_begin(&port->rx_stats_syncp);
+
+ stats->rx_packets = port->stats.rx_packets;
+ stats->rx_bytes = port->stats.rx_bytes;
+ stats->rx_errors = port->stats.rx_errors;
+ stats->rx_dropped = port->stats.rx_dropped;
+
+ stats->rx_length_errors = port->stats.rx_length_errors;
+ stats->rx_over_errors = port->stats.rx_over_errors;
+ stats->rx_crc_errors = port->stats.rx_crc_errors;
+ stats->rx_frame_errors = port->stats.rx_frame_errors;
+
+ } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
+
+ /* Racing with MIB and TX completion interrupts */
+ do {
+ start = u64_stats_fetch_begin(&port->ir_stats_syncp);
+
+ stats->tx_errors = port->stats.tx_errors;
+ stats->tx_packets = port->stats.tx_packets;
+ stats->tx_bytes = port->stats.tx_bytes;
+
+ stats->multicast = port->stats.multicast;
+ stats->rx_missed_errors = port->stats.rx_missed_errors;
+ stats->rx_fifo_errors = port->stats.rx_fifo_errors;
+
+ } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
+
+ /* Racing with hard_start_xmit */
+ do {
+ start = u64_stats_fetch_begin(&port->tx_stats_syncp);
+
+ stats->tx_dropped = port->stats.tx_dropped;
+
+ } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
+
+ stats->rx_dropped += stats->rx_missed_errors;
+}
+
+static int gmac_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ int max_len = gmac_pick_rx_max_len(new_mtu);
+
+ if (max_len < 0)
+ return -EINVAL;
+
+ gmac_disable_tx_rx(netdev);
+
+ netdev->mtu = new_mtu;
+ gmac_update_config0_reg(netdev, max_len << CONFIG0_MAXLEN_SHIFT,
+ CONFIG0_MAXLEN_MASK);
+
+ netdev_update_features(netdev);
+
+ gmac_enable_tx_rx(netdev);
+
+ return 0;
+}
+
+static netdev_features_t gmac_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ if (netdev->mtu + ETH_HLEN + VLAN_HLEN > MTU_SIZE_BIT_MASK)
+ features &= ~GMAC_OFFLOAD_FEATURES;
+
+ return features;
+}
+
+static int gmac_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ int enable = features & NETIF_F_RXCSUM;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&port->config_lock, flags);
+
+ reg = readl(port->gmac_base + GMAC_CONFIG0);
+ reg = enable ? reg | CONFIG0_RX_CHKSUM : reg & ~CONFIG0_RX_CHKSUM;
+ writel(reg, port->gmac_base + GMAC_CONFIG0);
+
+ spin_unlock_irqrestore(&port->config_lock, flags);
+ return 0;
+}
+
+static int gmac_get_sset_count(struct net_device *netdev, int sset)
+{
+ return sset == ETH_SS_STATS ? GMAC_STATS_NUM : 0;
+}
+
+static void gmac_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ memcpy(data, gmac_stats_strings, sizeof(gmac_stats_strings));
+}
+
+static void gmac_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *estats, u64 *values)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned int start;
+ u64 *p;
+ int i;
+
+ gmac_update_hw_stats(netdev);
+
+ /* Racing with MIB interrupt */
+ do {
+ p = values;
+ start = u64_stats_fetch_begin(&port->ir_stats_syncp);
+
+ for (i = 0; i < RX_STATS_NUM; i++)
+ *p++ = port->hw_stats[i];
+
+ } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
+ values = p;
+
+ /* Racing with RX NAPI */
+ do {
+ p = values;
+ start = u64_stats_fetch_begin(&port->rx_stats_syncp);
+
+ for (i = 0; i < RX_STATUS_NUM; i++)
+ *p++ = port->rx_stats[i];
+ for (i = 0; i < RX_CHKSUM_NUM; i++)
+ *p++ = port->rx_csum_stats[i];
+ *p++ = port->rx_napi_exits;
+
+ } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
+ values = p;
+
+ /* Racing with TX start_xmit */
+ do {
+ p = values;
+ start = u64_stats_fetch_begin(&port->tx_stats_syncp);
+
+ for (i = 0; i < TX_MAX_FRAGS; i++) {
+ *values++ = port->tx_frag_stats[i];
+ port->tx_frag_stats[i] = 0;
+ }
+ *values++ = port->tx_frags_linearized;
+ *values++ = port->tx_hw_csummed;
+
+ } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
+}
+
+static int gmac_get_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ if (!netdev->phydev)
+ return -ENXIO;
+ phy_ethtool_ksettings_get(netdev->phydev, cmd);
+
+ return 0;
+}
+
+static int gmac_set_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ if (!netdev->phydev)
+ return -ENXIO;
+ return phy_ethtool_ksettings_set(netdev->phydev, cmd);
+}
+
+static int gmac_nway_reset(struct net_device *netdev)
+{
+ if (!netdev->phydev)
+ return -ENXIO;
+ return phy_start_aneg(netdev->phydev);
+}
+
+static void gmac_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pparam)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ union gmac_config0 config0;
+
+ config0.bits32 = readl(port->gmac_base + GMAC_CONFIG0);
+
+ pparam->rx_pause = config0.bits.rx_fc_en;
+ pparam->tx_pause = config0.bits.tx_fc_en;
+ pparam->autoneg = true;
+}
+
+static void gmac_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *rp)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ union gmac_config0 config0;
+
+ config0.bits32 = readl(port->gmac_base + GMAC_CONFIG0);
+
+ rp->rx_max_pending = 1 << 15;
+ rp->rx_mini_max_pending = 0;
+ rp->rx_jumbo_max_pending = 0;
+ rp->tx_max_pending = 1 << 15;
+
+ rp->rx_pending = 1 << port->rxq_order;
+ rp->rx_mini_pending = 0;
+ rp->rx_jumbo_pending = 0;
+ rp->tx_pending = 1 << port->txq_order;
+}
+
+static int gmac_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *rp)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ int err = 0;
+
+ if (netif_running(netdev))
+ return -EBUSY;
+
+ if (rp->rx_pending) {
+ port->rxq_order = min(15, ilog2(rp->rx_pending - 1) + 1);
+ err = geth_resize_freeq(port);
+ }
+ if (rp->tx_pending) {
+ port->txq_order = min(15, ilog2(rp->tx_pending - 1) + 1);
+ port->irq_every_tx_packets = 1 << (port->txq_order - 2);
+ }
+
+ return err;
+}
+
+static int gmac_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+ ecmd->rx_max_coalesced_frames = 1;
+ ecmd->tx_max_coalesced_frames = port->irq_every_tx_packets;
+ ecmd->rx_coalesce_usecs = port->rx_coalesce_nsecs / 1000;
+
+ return 0;
+}
+
+static int gmac_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+ if (ecmd->tx_max_coalesced_frames < 1)
+ return -EINVAL;
+ if (ecmd->tx_max_coalesced_frames >= 1 << port->txq_order)
+ return -EINVAL;
+
+ port->irq_every_tx_packets = ecmd->tx_max_coalesced_frames;
+ port->rx_coalesce_nsecs = ecmd->rx_coalesce_usecs * 1000;
+
+ return 0;
+}
+
+static u32 gmac_get_msglevel(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+ return port->msg_enable;
+}
+
+static void gmac_set_msglevel(struct net_device *netdev, u32 level)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+ port->msg_enable = level;
+}
+
+static void gmac_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, netdev->dev_id ? "1" : "0");
+}
+
+static const struct net_device_ops gmac_351x_ops = {
+ .ndo_init = gmac_init,
+ .ndo_uninit = gmac_uninit,
+ .ndo_open = gmac_open,
+ .ndo_stop = gmac_stop,
+ .ndo_start_xmit = gmac_start_xmit,
+ .ndo_tx_timeout = gmac_tx_timeout,
+ .ndo_set_rx_mode = gmac_set_rx_mode,
+ .ndo_set_mac_address = gmac_set_mac_address,
+ .ndo_get_stats64 = gmac_get_stats64,
+ .ndo_change_mtu = gmac_change_mtu,
+ .ndo_fix_features = gmac_fix_features,
+ .ndo_set_features = gmac_set_features,
+};
+
+static const struct ethtool_ops gmac_351x_ethtool_ops = {
+ .get_sset_count = gmac_get_sset_count,
+ .get_strings = gmac_get_strings,
+ .get_ethtool_stats = gmac_get_ethtool_stats,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = gmac_get_ksettings,
+ .set_link_ksettings = gmac_set_ksettings,
+ .nway_reset = gmac_nway_reset,
+ .get_pauseparam = gmac_get_pauseparam,
+ .get_ringparam = gmac_get_ringparam,
+ .set_ringparam = gmac_set_ringparam,
+ .get_coalesce = gmac_get_coalesce,
+ .set_coalesce = gmac_set_coalesce,
+ .get_msglevel = gmac_get_msglevel,
+ .set_msglevel = gmac_set_msglevel,
+ .get_drvinfo = gmac_get_drvinfo,
+};
+
+static irqreturn_t gemini_port_irq_thread(int irq, void *data)
+{
+ unsigned long irqmask = SWFQ_EMPTY_INT_BIT;
+ struct gemini_ethernet_port *port = data;
+ struct gemini_ethernet *geth;
+ unsigned long flags;
+
+ geth = port->geth;
+ /* The queue is half empty so refill it */
+ geth_fill_freeq(geth, true);
+
+ spin_lock_irqsave(&geth->irq_lock, flags);
+ /* ACK queue interrupt */
+ writel(irqmask, geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+ /* Enable queue interrupt again */
+ irqmask |= readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ writel(irqmask, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t gemini_port_irq(int irq, void *data)
+{
+ struct gemini_ethernet_port *port = data;
+ struct gemini_ethernet *geth;
+ irqreturn_t ret = IRQ_NONE;
+ u32 val, en;
+
+ geth = port->geth;
+ spin_lock(&geth->irq_lock);
+
+ val = readl(geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+ en = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+
+ if (val & en & SWFQ_EMPTY_INT_BIT) {
+ /* Disable the queue empty interrupt while we work on
+ * processing the queue. Also disable overrun interrupts
+ * as there is not much we can do about it here.
+ */
+ en &= ~(SWFQ_EMPTY_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT
+ | GMAC1_RX_OVERRUN_INT_BIT);
+ writel(en, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ spin_unlock(&geth->irq_lock);
+
+ return ret;
+}
+
+static void gemini_port_remove(struct gemini_ethernet_port *port)
+{
+ if (port->netdev)
+ unregister_netdev(port->netdev);
+ clk_disable_unprepare(port->pclk);
+ geth_cleanup_freeq(port->geth);
+}
+
+static void gemini_ethernet_init(struct gemini_ethernet *geth)
+{
+ writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+ writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+ writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_2_REG);
+ writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_3_REG);
+ writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+
+ /* Interrupt config:
+ *
+ * GMAC0 intr bits ------> int0 ----> eth0
+ * GMAC1 intr bits ------> int1 ----> eth1
+ * TOE intr -------------> int1 ----> eth1
+ * Classification Intr --> int0 ----> eth0
+ * Default Q0 -----------> int0 ----> eth0
+ * Default Q1 -----------> int1 ----> eth1
+ * FreeQ intr -----------> int1 ----> eth1
+ */
+ writel(0xCCFC0FC0, geth->base + GLOBAL_INTERRUPT_SELECT_0_REG);
+ writel(0x00F00002, geth->base + GLOBAL_INTERRUPT_SELECT_1_REG);
+ writel(0xFFFFFFFF, geth->base + GLOBAL_INTERRUPT_SELECT_2_REG);
+ writel(0xFFFFFFFF, geth->base + GLOBAL_INTERRUPT_SELECT_3_REG);
+ writel(0xFF000003, geth->base + GLOBAL_INTERRUPT_SELECT_4_REG);
+
+ /* edge-triggered interrupts packed to level-triggered one... */
+ writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_0_REG);
+ writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
+ writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_2_REG);
+ writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_3_REG);
+ writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+
+ /* Set up queue */
+ writel(0, geth->base + GLOBAL_SW_FREEQ_BASE_SIZE_REG);
+ writel(0, geth->base + GLOBAL_HW_FREEQ_BASE_SIZE_REG);
+ writel(0, geth->base + GLOBAL_SWFQ_RWPTR_REG);
+ writel(0, geth->base + GLOBAL_HWFQ_RWPTR_REG);
+
+ geth->freeq_frag_order = DEFAULT_RX_BUF_ORDER;
+ /* This makes the queue resize on probe() so that we
+ * set up and enable the queue IRQ. FIXME: fragile.
+ */
+ geth->freeq_order = 1;
+}
+
+static void gemini_port_save_mac_addr(struct gemini_ethernet_port *port)
+{
+ port->mac_addr[0] =
+ cpu_to_le32(readl(port->gmac_base + GMAC_STA_ADD0));
+ port->mac_addr[1] =
+ cpu_to_le32(readl(port->gmac_base + GMAC_STA_ADD1));
+ port->mac_addr[2] =
+ cpu_to_le32(readl(port->gmac_base + GMAC_STA_ADD2));
+}
+
+static int gemini_ethernet_port_probe(struct platform_device *pdev)
+{
+ char *port_names[2] = { "ethernet0", "ethernet1" };
+ struct gemini_ethernet_port *port;
+ struct device *dev = &pdev->dev;
+ struct gemini_ethernet *geth;
+ struct net_device *netdev;
+ struct resource *gmacres;
+ struct resource *dmares;
+ struct device *parent;
+ unsigned int id;
+ int irq;
+ int ret;
+
+ parent = dev->parent;
+ geth = dev_get_drvdata(parent);
+
+ if (!strcmp(dev_name(dev), "60008000.ethernet-port"))
+ id = 0;
+ else if (!strcmp(dev_name(dev), "6000c000.ethernet-port"))
+ id = 1;
+ else
+ return -ENODEV;
+
+ dev_info(dev, "probe %s ID %d\n", dev_name(dev), id);
+
+ netdev = alloc_etherdev_mq(sizeof(*port), TX_QUEUE_NUM);
+ if (!netdev) {
+ dev_err(dev, "Can't allocate ethernet device #%d\n", id);
+ return -ENOMEM;
+ }
+
+ port = netdev_priv(netdev);
+ SET_NETDEV_DEV(netdev, dev);
+ port->netdev = netdev;
+ port->id = id;
+ port->geth = geth;
+ port->dev = dev;
+
+ /* DMA memory */
+ dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!dmares) {
+ dev_err(dev, "no DMA resource\n");
+ return -ENODEV;
+ }
+ port->dma_base = devm_ioremap_resource(dev, dmares);
+ if (IS_ERR(port->dma_base))
+ return PTR_ERR(port->dma_base);
+
+ /* GMAC config memory */
+ gmacres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!gmacres) {
+ dev_err(dev, "no GMAC resource\n");
+ return -ENODEV;
+ }
+ port->gmac_base = devm_ioremap_resource(dev, gmacres);
+ if (IS_ERR(port->gmac_base))
+ return PTR_ERR(port->gmac_base);
+
+ /* Interrupt */
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "no IRQ\n");
+ return irq ? irq : -ENODEV;
+ }
+ port->irq = irq;
+
+ /* Clock the port */
+ port->pclk = devm_clk_get(dev, "PCLK");
+ if (IS_ERR(port->pclk)) {
+ dev_err(dev, "no PCLK\n");
+ return PTR_ERR(port->pclk);
+ }
+ ret = clk_prepare_enable(port->pclk);
+ if (ret)
+ return ret;
+
+ /* Maybe there is a nice ethernet address we should use */
+ gemini_port_save_mac_addr(port);
+
+ /* Reset the port */
+ port->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(port->reset)) {
+ dev_err(dev, "no reset\n");
+ return PTR_ERR(port->reset);
+ }
+ reset_control_reset(port->reset);
+ usleep_range(100, 500);
+
+ /* Assign pointer in the main state container */
+ if (!id)
+ geth->port0 = port;
+ else
+ geth->port1 = port;
+ platform_set_drvdata(pdev, port);
+
+ /* Set up and register the netdev */
+ netdev->dev_id = port->id;
+ netdev->irq = irq;
+ netdev->netdev_ops = &gmac_351x_ops;
+ netdev->ethtool_ops = &gmac_351x_ethtool_ops;
+
+ spin_lock_init(&port->config_lock);
+ gmac_clear_hw_stats(netdev);
+
+ netdev->hw_features = GMAC_OFFLOAD_FEATURES;
+ netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO;
+
+ port->freeq_refill = 0;
+ netif_napi_add(netdev, &port->napi, gmac_napi_poll,
+ DEFAULT_NAPI_WEIGHT);
+
+ if (is_valid_ether_addr((void *)port->mac_addr)) {
+ memcpy(netdev->dev_addr, port->mac_addr, ETH_ALEN);
+ } else {
+ dev_dbg(dev, "ethernet address 0x%08x%08x%08x invalid\n",
+ port->mac_addr[0], port->mac_addr[1],
+ port->mac_addr[2]);
+ dev_info(dev, "using a random ethernet address\n");
+ random_ether_addr(netdev->dev_addr);
+ }
+ gmac_write_mac_address(netdev);
+
+ ret = devm_request_threaded_irq(port->dev,
+ port->irq,
+ gemini_port_irq,
+ gemini_port_irq_thread,
+ IRQF_SHARED,
+ port_names[port->id],
+ port);
+ if (ret)
+ return ret;
+
+ ret = register_netdev(netdev);
+ if (!ret) {
+ netdev_info(netdev,
+ "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n",
+ port->irq, &dmares->start,
+ &gmacres->start);
+ ret = gmac_setup_phy(netdev);
+ if (ret)
+ netdev_info(netdev,
+ "PHY init failed, deferring to ifup time\n");
+ return 0;
+ }
+
+ port->netdev = NULL;
+ free_netdev(netdev);
+ return ret;
+}
+
+static int gemini_ethernet_port_remove(struct platform_device *pdev)
+{
+ struct gemini_ethernet_port *port = platform_get_drvdata(pdev);
+
+ gemini_port_remove(port);
+ return 0;
+}
+
+static const struct of_device_id gemini_ethernet_port_of_match[] = {
+ {
+ .compatible = "cortina,gemini-ethernet-port",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gemini_ethernet_port_of_match);
+
+static struct platform_driver gemini_ethernet_port_driver = {
+ .driver = {
+ .name = "gemini-ethernet-port",
+ .of_match_table = of_match_ptr(gemini_ethernet_port_of_match),
+ },
+ .probe = gemini_ethernet_port_probe,
+ .remove = gemini_ethernet_port_remove,
+};
+
+static int gemini_ethernet_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gemini_ethernet *geth;
+ unsigned int retry = 5;
+ struct resource *res;
+ u32 val;
+
+ /* Global registers */
+ geth = devm_kzalloc(dev, sizeof(*geth), GFP_KERNEL);
+ if (!geth)
+ return -ENOMEM;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ geth->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(geth->base))
+ return PTR_ERR(geth->base);
+ geth->dev = dev;
+
+ /* Wait for ports to stabilize */
+ do {
+ udelay(2);
+ val = readl(geth->base + GLOBAL_TOE_VERSION_REG);
+ barrier();
+ } while (!val && --retry);
+ if (!retry) {
+ dev_err(dev, "failed to reset ethernet\n");
+ return -EIO;
+ }
+ dev_info(dev, "Ethernet device ID: 0x%03x, revision 0x%01x\n",
+ (val >> 4) & 0xFFFU, val & 0xFU);
+
+ spin_lock_init(&geth->irq_lock);
+ spin_lock_init(&geth->freeq_lock);
+ gemini_ethernet_init(geth);
+
+ /* The children will use this */
+ platform_set_drvdata(pdev, geth);
+
+ /* Spawn child devices for the two ports */
+ return devm_of_platform_populate(dev);
+}
+
+static int gemini_ethernet_remove(struct platform_device *pdev)
+{
+ struct gemini_ethernet *geth = platform_get_drvdata(pdev);
+
+ gemini_ethernet_init(geth);
+ geth_cleanup_freeq(geth);
+
+ return 0;
+}
+
+static const struct of_device_id gemini_ethernet_of_match[] = {
+ {
+ .compatible = "cortina,gemini-ethernet",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gemini_ethernet_of_match);
+
+static struct platform_driver gemini_ethernet_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(gemini_ethernet_of_match),
+ },
+ .probe = gemini_ethernet_probe,
+ .remove = gemini_ethernet_remove,
+};
+
+static int __init gemini_ethernet_module_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&gemini_ethernet_port_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&gemini_ethernet_driver);
+ if (ret) {
+ platform_driver_unregister(&gemini_ethernet_port_driver);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(gemini_ethernet_module_init);
+
+static void __exit gemini_ethernet_module_exit(void)
+{
+ platform_driver_unregister(&gemini_ethernet_driver);
+ platform_driver_unregister(&gemini_ethernet_port_driver);
+}
+module_exit(gemini_ethernet_module_exit);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("StorLink SL351x (Gemini) ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/net/ethernet/cortina/gemini.h b/drivers/net/ethernet/cortina/gemini.h
new file mode 100644
index 000000000000..0b12f89bf89a
--- /dev/null
+++ b/drivers/net/ethernet/cortina/gemini.h
@@ -0,0 +1,958 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Register definitions for Gemini GMAC Ethernet device driver
+ *
+ * Copyright (C) 2006 Storlink, Corp.
+ * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+ * Copyright (C) 2010 Michał Mirosław <mirq-linux@rere.qmqm.pl>
+ * Copytight (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ */
+#ifndef _GEMINI_ETHERNET_H
+#define _GEMINI_ETHERNET_H
+
+#include <linux/bitops.h>
+
+/* Base Registers */
+#define TOE_NONTOE_QUE_HDR_BASE 0x2000
+#define TOE_TOE_QUE_HDR_BASE 0x3000
+
+/* Queue ID */
+#define TOE_SW_FREE_QID 0x00
+#define TOE_HW_FREE_QID 0x01
+#define TOE_GMAC0_SW_TXQ0_QID 0x02
+#define TOE_GMAC0_SW_TXQ1_QID 0x03
+#define TOE_GMAC0_SW_TXQ2_QID 0x04
+#define TOE_GMAC0_SW_TXQ3_QID 0x05
+#define TOE_GMAC0_SW_TXQ4_QID 0x06
+#define TOE_GMAC0_SW_TXQ5_QID 0x07
+#define TOE_GMAC0_HW_TXQ0_QID 0x08
+#define TOE_GMAC0_HW_TXQ1_QID 0x09
+#define TOE_GMAC0_HW_TXQ2_QID 0x0A
+#define TOE_GMAC0_HW_TXQ3_QID 0x0B
+#define TOE_GMAC1_SW_TXQ0_QID 0x12
+#define TOE_GMAC1_SW_TXQ1_QID 0x13
+#define TOE_GMAC1_SW_TXQ2_QID 0x14
+#define TOE_GMAC1_SW_TXQ3_QID 0x15
+#define TOE_GMAC1_SW_TXQ4_QID 0x16
+#define TOE_GMAC1_SW_TXQ5_QID 0x17
+#define TOE_GMAC1_HW_TXQ0_QID 0x18
+#define TOE_GMAC1_HW_TXQ1_QID 0x19
+#define TOE_GMAC1_HW_TXQ2_QID 0x1A
+#define TOE_GMAC1_HW_TXQ3_QID 0x1B
+#define TOE_GMAC0_DEFAULT_QID 0x20
+#define TOE_GMAC1_DEFAULT_QID 0x21
+#define TOE_CLASSIFICATION_QID(x) (0x22 + x) /* 0x22 ~ 0x2F */
+#define TOE_TOE_QID(x) (0x40 + x) /* 0x40 ~ 0x7F */
+
+/* TOE DMA Queue Size should be 2^n, n = 6...12
+ * TOE DMA Queues are the following queue types:
+ * SW Free Queue, HW Free Queue,
+ * GMAC 0/1 SW TX Q0-5, and GMAC 0/1 HW TX Q0-5
+ * The base address and descriptor number are configured at
+ * DMA Queues Descriptor Ring Base Address/Size Register (offset 0x0004)
+ */
+#define GET_WPTR(addr) readw((addr) + 2)
+#define GET_RPTR(addr) readw((addr))
+#define SET_WPTR(addr, data) writew((data), (addr) + 2)
+#define SET_RPTR(addr, data) writew((data), (addr))
+#define __RWPTR_NEXT(x, mask) (((unsigned int)(x) + 1) & (mask))
+#define __RWPTR_PREV(x, mask) (((unsigned int)(x) - 1) & (mask))
+#define __RWPTR_DISTANCE(r, w, mask) (((unsigned int)(w) - (r)) & (mask))
+#define __RWPTR_MASK(order) ((1 << (order)) - 1)
+#define RWPTR_NEXT(x, order) __RWPTR_NEXT((x), __RWPTR_MASK((order)))
+#define RWPTR_PREV(x, order) __RWPTR_PREV((x), __RWPTR_MASK((order)))
+#define RWPTR_DISTANCE(r, w, order) __RWPTR_DISTANCE((r), (w), \
+ __RWPTR_MASK((order)))
+
+/* Global registers */
+#define GLOBAL_TOE_VERSION_REG 0x0000
+#define GLOBAL_SW_FREEQ_BASE_SIZE_REG 0x0004
+#define GLOBAL_HW_FREEQ_BASE_SIZE_REG 0x0008
+#define GLOBAL_DMA_SKB_SIZE_REG 0x0010
+#define GLOBAL_SWFQ_RWPTR_REG 0x0014
+#define GLOBAL_HWFQ_RWPTR_REG 0x0018
+#define GLOBAL_INTERRUPT_STATUS_0_REG 0x0020
+#define GLOBAL_INTERRUPT_ENABLE_0_REG 0x0024
+#define GLOBAL_INTERRUPT_SELECT_0_REG 0x0028
+#define GLOBAL_INTERRUPT_STATUS_1_REG 0x0030
+#define GLOBAL_INTERRUPT_ENABLE_1_REG 0x0034
+#define GLOBAL_INTERRUPT_SELECT_1_REG 0x0038
+#define GLOBAL_INTERRUPT_STATUS_2_REG 0x0040
+#define GLOBAL_INTERRUPT_ENABLE_2_REG 0x0044
+#define GLOBAL_INTERRUPT_SELECT_2_REG 0x0048
+#define GLOBAL_INTERRUPT_STATUS_3_REG 0x0050
+#define GLOBAL_INTERRUPT_ENABLE_3_REG 0x0054
+#define GLOBAL_INTERRUPT_SELECT_3_REG 0x0058
+#define GLOBAL_INTERRUPT_STATUS_4_REG 0x0060
+#define GLOBAL_INTERRUPT_ENABLE_4_REG 0x0064
+#define GLOBAL_INTERRUPT_SELECT_4_REG 0x0068
+#define GLOBAL_HASH_TABLE_BASE_REG 0x006C
+#define GLOBAL_QUEUE_THRESHOLD_REG 0x0070
+
+/* GMAC 0/1 DMA/TOE register */
+#define GMAC_DMA_CTRL_REG 0x0000
+#define GMAC_TX_WEIGHTING_CTRL_0_REG 0x0004
+#define GMAC_TX_WEIGHTING_CTRL_1_REG 0x0008
+#define GMAC_SW_TX_QUEUE0_PTR_REG 0x000C
+#define GMAC_SW_TX_QUEUE1_PTR_REG 0x0010
+#define GMAC_SW_TX_QUEUE2_PTR_REG 0x0014
+#define GMAC_SW_TX_QUEUE3_PTR_REG 0x0018
+#define GMAC_SW_TX_QUEUE4_PTR_REG 0x001C
+#define GMAC_SW_TX_QUEUE5_PTR_REG 0x0020
+#define GMAC_SW_TX_QUEUE_PTR_REG(i) (GMAC_SW_TX_QUEUE0_PTR_REG + 4 * (i))
+#define GMAC_HW_TX_QUEUE0_PTR_REG 0x0024
+#define GMAC_HW_TX_QUEUE1_PTR_REG 0x0028
+#define GMAC_HW_TX_QUEUE2_PTR_REG 0x002C
+#define GMAC_HW_TX_QUEUE3_PTR_REG 0x0030
+#define GMAC_HW_TX_QUEUE_PTR_REG(i) (GMAC_HW_TX_QUEUE0_PTR_REG + 4 * (i))
+#define GMAC_DMA_TX_FIRST_DESC_REG 0x0038
+#define GMAC_DMA_TX_CURR_DESC_REG 0x003C
+#define GMAC_DMA_TX_DESC_WORD0_REG 0x0040
+#define GMAC_DMA_TX_DESC_WORD1_REG 0x0044
+#define GMAC_DMA_TX_DESC_WORD2_REG 0x0048
+#define GMAC_DMA_TX_DESC_WORD3_REG 0x004C
+#define GMAC_SW_TX_QUEUE_BASE_REG 0x0050
+#define GMAC_HW_TX_QUEUE_BASE_REG 0x0054
+#define GMAC_DMA_RX_FIRST_DESC_REG 0x0058
+#define GMAC_DMA_RX_CURR_DESC_REG 0x005C
+#define GMAC_DMA_RX_DESC_WORD0_REG 0x0060
+#define GMAC_DMA_RX_DESC_WORD1_REG 0x0064
+#define GMAC_DMA_RX_DESC_WORD2_REG 0x0068
+#define GMAC_DMA_RX_DESC_WORD3_REG 0x006C
+#define GMAC_HASH_ENGINE_REG0 0x0070
+#define GMAC_HASH_ENGINE_REG1 0x0074
+/* matching rule 0 Control register 0 */
+#define GMAC_MR0CR0 0x0078
+#define GMAC_MR0CR1 0x007C
+#define GMAC_MR0CR2 0x0080
+#define GMAC_MR1CR0 0x0084
+#define GMAC_MR1CR1 0x0088
+#define GMAC_MR1CR2 0x008C
+#define GMAC_MR2CR0 0x0090
+#define GMAC_MR2CR1 0x0094
+#define GMAC_MR2CR2 0x0098
+#define GMAC_MR3CR0 0x009C
+#define GMAC_MR3CR1 0x00A0
+#define GMAC_MR3CR2 0x00A4
+/* Support Protocol Register 0 */
+#define GMAC_SPR0 0x00A8
+#define GMAC_SPR1 0x00AC
+#define GMAC_SPR2 0x00B0
+#define GMAC_SPR3 0x00B4
+#define GMAC_SPR4 0x00B8
+#define GMAC_SPR5 0x00BC
+#define GMAC_SPR6 0x00C0
+#define GMAC_SPR7 0x00C4
+/* GMAC Hash/Rx/Tx AHB Weighting register */
+#define GMAC_AHB_WEIGHT_REG 0x00C8
+
+/* TOE GMAC 0/1 register */
+#define GMAC_STA_ADD0 0x0000
+#define GMAC_STA_ADD1 0x0004
+#define GMAC_STA_ADD2 0x0008
+#define GMAC_RX_FLTR 0x000c
+#define GMAC_MCAST_FIL0 0x0010
+#define GMAC_MCAST_FIL1 0x0014
+#define GMAC_CONFIG0 0x0018
+#define GMAC_CONFIG1 0x001c
+#define GMAC_CONFIG2 0x0020
+#define GMAC_CONFIG3 0x0024
+#define GMAC_RESERVED 0x0028
+#define GMAC_STATUS 0x002c
+#define GMAC_IN_DISCARDS 0x0030
+#define GMAC_IN_ERRORS 0x0034
+#define GMAC_IN_MCAST 0x0038
+#define GMAC_IN_BCAST 0x003c
+#define GMAC_IN_MAC1 0x0040 /* for STA 1 MAC Address */
+#define GMAC_IN_MAC2 0x0044 /* for STA 2 MAC Address */
+
+#define RX_STATS_NUM 6
+
+/* DMA Queues description Ring Base Address/Size Register (offset 0x0004) */
+union dma_q_base_size {
+ unsigned int bits32;
+ unsigned int base_size;
+};
+
+#define DMA_Q_BASE_MASK (~0x0f)
+
+/* DMA SKB Buffer register (offset 0x0008) */
+union dma_skb_size {
+ unsigned int bits32;
+ struct bit_0008 {
+ unsigned int sw_skb_size : 16; /* SW Free poll SKB Size */
+ unsigned int hw_skb_size : 16; /* HW Free poll SKB Size */
+ } bits;
+};
+
+/* DMA SW Free Queue Read/Write Pointer Register (offset 0x000c) */
+union dma_rwptr {
+ unsigned int bits32;
+ struct bit_000c {
+ unsigned int rptr : 16; /* Read Ptr, RO */
+ unsigned int wptr : 16; /* Write Ptr, RW */
+ } bits;
+};
+
+/* Interrupt Status Register 0 (offset 0x0020)
+ * Interrupt Mask Register 0 (offset 0x0024)
+ * Interrupt Select Register 0 (offset 0x0028)
+ */
+#define GMAC1_TXDERR_INT_BIT BIT(31)
+#define GMAC1_TXPERR_INT_BIT BIT(30)
+#define GMAC0_TXDERR_INT_BIT BIT(29)
+#define GMAC0_TXPERR_INT_BIT BIT(28)
+#define GMAC1_RXDERR_INT_BIT BIT(27)
+#define GMAC1_RXPERR_INT_BIT BIT(26)
+#define GMAC0_RXDERR_INT_BIT BIT(25)
+#define GMAC0_RXPERR_INT_BIT BIT(24)
+#define GMAC1_SWTQ15_FIN_INT_BIT BIT(23)
+#define GMAC1_SWTQ14_FIN_INT_BIT BIT(22)
+#define GMAC1_SWTQ13_FIN_INT_BIT BIT(21)
+#define GMAC1_SWTQ12_FIN_INT_BIT BIT(20)
+#define GMAC1_SWTQ11_FIN_INT_BIT BIT(19)
+#define GMAC1_SWTQ10_FIN_INT_BIT BIT(18)
+#define GMAC0_SWTQ05_FIN_INT_BIT BIT(17)
+#define GMAC0_SWTQ04_FIN_INT_BIT BIT(16)
+#define GMAC0_SWTQ03_FIN_INT_BIT BIT(15)
+#define GMAC0_SWTQ02_FIN_INT_BIT BIT(14)
+#define GMAC0_SWTQ01_FIN_INT_BIT BIT(13)
+#define GMAC0_SWTQ00_FIN_INT_BIT BIT(12)
+#define GMAC1_SWTQ15_EOF_INT_BIT BIT(11)
+#define GMAC1_SWTQ14_EOF_INT_BIT BIT(10)
+#define GMAC1_SWTQ13_EOF_INT_BIT BIT(9)
+#define GMAC1_SWTQ12_EOF_INT_BIT BIT(8)
+#define GMAC1_SWTQ11_EOF_INT_BIT BIT(7)
+#define GMAC1_SWTQ10_EOF_INT_BIT BIT(6)
+#define GMAC0_SWTQ05_EOF_INT_BIT BIT(5)
+#define GMAC0_SWTQ04_EOF_INT_BIT BIT(4)
+#define GMAC0_SWTQ03_EOF_INT_BIT BIT(3)
+#define GMAC0_SWTQ02_EOF_INT_BIT BIT(2)
+#define GMAC0_SWTQ01_EOF_INT_BIT BIT(1)
+#define GMAC0_SWTQ00_EOF_INT_BIT BIT(0)
+
+/* Interrupt Status Register 1 (offset 0x0030)
+ * Interrupt Mask Register 1 (offset 0x0034)
+ * Interrupt Select Register 1 (offset 0x0038)
+ */
+#define TOE_IQ3_FULL_INT_BIT BIT(31)
+#define TOE_IQ2_FULL_INT_BIT BIT(30)
+#define TOE_IQ1_FULL_INT_BIT BIT(29)
+#define TOE_IQ0_FULL_INT_BIT BIT(28)
+#define TOE_IQ3_INT_BIT BIT(27)
+#define TOE_IQ2_INT_BIT BIT(26)
+#define TOE_IQ1_INT_BIT BIT(25)
+#define TOE_IQ0_INT_BIT BIT(24)
+#define GMAC1_HWTQ13_EOF_INT_BIT BIT(23)
+#define GMAC1_HWTQ12_EOF_INT_BIT BIT(22)
+#define GMAC1_HWTQ11_EOF_INT_BIT BIT(21)
+#define GMAC1_HWTQ10_EOF_INT_BIT BIT(20)
+#define GMAC0_HWTQ03_EOF_INT_BIT BIT(19)
+#define GMAC0_HWTQ02_EOF_INT_BIT BIT(18)
+#define GMAC0_HWTQ01_EOF_INT_BIT BIT(17)
+#define GMAC0_HWTQ00_EOF_INT_BIT BIT(16)
+#define CLASS_RX_INT_BIT(x) BIT((x + 2))
+#define DEFAULT_Q1_INT_BIT BIT(1)
+#define DEFAULT_Q0_INT_BIT BIT(0)
+
+#define TOE_IQ_INT_BITS (TOE_IQ0_INT_BIT | TOE_IQ1_INT_BIT | \
+ TOE_IQ2_INT_BIT | TOE_IQ3_INT_BIT)
+#define TOE_IQ_FULL_BITS (TOE_IQ0_FULL_INT_BIT | TOE_IQ1_FULL_INT_BIT | \
+ TOE_IQ2_FULL_INT_BIT | TOE_IQ3_FULL_INT_BIT)
+#define TOE_IQ_ALL_BITS (TOE_IQ_INT_BITS | TOE_IQ_FULL_BITS)
+#define TOE_CLASS_RX_INT_BITS 0xfffc
+
+/* Interrupt Status Register 2 (offset 0x0040)
+ * Interrupt Mask Register 2 (offset 0x0044)
+ * Interrupt Select Register 2 (offset 0x0048)
+ */
+#define TOE_QL_FULL_INT_BIT(x) BIT(x)
+
+/* Interrupt Status Register 3 (offset 0x0050)
+ * Interrupt Mask Register 3 (offset 0x0054)
+ * Interrupt Select Register 3 (offset 0x0058)
+ */
+#define TOE_QH_FULL_INT_BIT(x) BIT(x - 32)
+
+/* Interrupt Status Register 4 (offset 0x0060)
+ * Interrupt Mask Register 4 (offset 0x0064)
+ * Interrupt Select Register 4 (offset 0x0068)
+ */
+#define GMAC1_RESERVED_INT_BIT BIT(31)
+#define GMAC1_MIB_INT_BIT BIT(30)
+#define GMAC1_RX_PAUSE_ON_INT_BIT BIT(29)
+#define GMAC1_TX_PAUSE_ON_INT_BIT BIT(28)
+#define GMAC1_RX_PAUSE_OFF_INT_BIT BIT(27)
+#define GMAC1_TX_PAUSE_OFF_INT_BIT BIT(26)
+#define GMAC1_RX_OVERRUN_INT_BIT BIT(25)
+#define GMAC1_STATUS_CHANGE_INT_BIT BIT(24)
+#define GMAC0_RESERVED_INT_BIT BIT(23)
+#define GMAC0_MIB_INT_BIT BIT(22)
+#define GMAC0_RX_PAUSE_ON_INT_BIT BIT(21)
+#define GMAC0_TX_PAUSE_ON_INT_BIT BIT(20)
+#define GMAC0_RX_PAUSE_OFF_INT_BIT BIT(19)
+#define GMAC0_TX_PAUSE_OFF_INT_BIT BIT(18)
+#define GMAC0_RX_OVERRUN_INT_BIT BIT(17)
+#define GMAC0_STATUS_CHANGE_INT_BIT BIT(16)
+#define CLASS_RX_FULL_INT_BIT(x) BIT(x + 2)
+#define HWFQ_EMPTY_INT_BIT BIT(1)
+#define SWFQ_EMPTY_INT_BIT BIT(0)
+
+#define GMAC0_INT_BITS (GMAC0_RESERVED_INT_BIT | GMAC0_MIB_INT_BIT | \
+ GMAC0_RX_PAUSE_ON_INT_BIT | \
+ GMAC0_TX_PAUSE_ON_INT_BIT | \
+ GMAC0_RX_PAUSE_OFF_INT_BIT | \
+ GMAC0_TX_PAUSE_OFF_INT_BIT | \
+ GMAC0_RX_OVERRUN_INT_BIT | \
+ GMAC0_STATUS_CHANGE_INT_BIT)
+#define GMAC1_INT_BITS (GMAC1_RESERVED_INT_BIT | GMAC1_MIB_INT_BIT | \
+ GMAC1_RX_PAUSE_ON_INT_BIT | \
+ GMAC1_TX_PAUSE_ON_INT_BIT | \
+ GMAC1_RX_PAUSE_OFF_INT_BIT | \
+ GMAC1_TX_PAUSE_OFF_INT_BIT | \
+ GMAC1_RX_OVERRUN_INT_BIT | \
+ GMAC1_STATUS_CHANGE_INT_BIT)
+
+#define CLASS_RX_FULL_INT_BITS 0xfffc
+
+/* GLOBAL_QUEUE_THRESHOLD_REG (offset 0x0070) */
+union queue_threshold {
+ unsigned int bits32;
+ struct bit_0070_2 {
+ /* 7:0 Software Free Queue Empty Threshold */
+ unsigned int swfq_empty:8;
+ /* 15:8 Hardware Free Queue Empty Threshold */
+ unsigned int hwfq_empty:8;
+ /* 23:16 */
+ unsigned int intrq:8;
+ /* 31:24 */
+ unsigned int toe_class:8;
+ } bits;
+};
+
+/* GMAC DMA Control Register
+ * GMAC0 offset 0x8000
+ * GMAC1 offset 0xC000
+ */
+union gmac_dma_ctrl {
+ unsigned int bits32;
+ struct bit_8000 {
+ /* bit 1:0 Peripheral Bus Width */
+ unsigned int td_bus:2;
+ /* bit 3:2 TxDMA max burst size for every AHB request */
+ unsigned int td_burst_size:2;
+ /* bit 7:4 TxDMA protection control */
+ unsigned int td_prot:4;
+ /* bit 9:8 Peripheral Bus Width */
+ unsigned int rd_bus:2;
+ /* bit 11:10 DMA max burst size for every AHB request */
+ unsigned int rd_burst_size:2;
+ /* bit 15:12 DMA Protection Control */
+ unsigned int rd_prot:4;
+ /* bit 17:16 */
+ unsigned int rd_insert_bytes:2;
+ /* bit 27:18 */
+ unsigned int reserved:10;
+ /* bit 28 1: Drop, 0: Accept */
+ unsigned int drop_small_ack:1;
+ /* bit 29 Loopback TxDMA to RxDMA */
+ unsigned int loopback:1;
+ /* bit 30 Tx DMA Enable */
+ unsigned int td_enable:1;
+ /* bit 31 Rx DMA Enable */
+ unsigned int rd_enable:1;
+ } bits;
+};
+
+/* GMAC Tx Weighting Control Register 0
+ * GMAC0 offset 0x8004
+ * GMAC1 offset 0xC004
+ */
+union gmac_tx_wcr0 {
+ unsigned int bits32;
+ struct bit_8004 {
+ /* bit 5:0 HW TX Queue 3 */
+ unsigned int hw_tq0:6;
+ /* bit 11:6 HW TX Queue 2 */
+ unsigned int hw_tq1:6;
+ /* bit 17:12 HW TX Queue 1 */
+ unsigned int hw_tq2:6;
+ /* bit 23:18 HW TX Queue 0 */
+ unsigned int hw_tq3:6;
+ /* bit 31:24 */
+ unsigned int reserved:8;
+ } bits;
+};
+
+/* GMAC Tx Weighting Control Register 1
+ * GMAC0 offset 0x8008
+ * GMAC1 offset 0xC008
+ */
+union gmac_tx_wcr1 {
+ unsigned int bits32;
+ struct bit_8008 {
+ /* bit 4:0 SW TX Queue 0 */
+ unsigned int sw_tq0:5;
+ /* bit 9:5 SW TX Queue 1 */
+ unsigned int sw_tq1:5;
+ /* bit 14:10 SW TX Queue 2 */
+ unsigned int sw_tq2:5;
+ /* bit 19:15 SW TX Queue 3 */
+ unsigned int sw_tq3:5;
+ /* bit 24:20 SW TX Queue 4 */
+ unsigned int sw_tq4:5;
+ /* bit 29:25 SW TX Queue 5 */
+ unsigned int sw_tq5:5;
+ /* bit 31:30 */
+ unsigned int reserved:2;
+ } bits;
+};
+
+/* GMAC DMA Tx Description Word 0 Register
+ * GMAC0 offset 0x8040
+ * GMAC1 offset 0xC040
+ */
+union gmac_txdesc_0 {
+ unsigned int bits32;
+ struct bit_8040 {
+ /* bit 15:0 Transfer size */
+ unsigned int buffer_size:16;
+ /* bit 21:16 number of descriptors used for the current frame */
+ unsigned int desc_count:6;
+ /* bit 22 Tx Status, 1: Successful 0: Failed */
+ unsigned int status_tx_ok:1;
+ /* bit 28:23 Tx Status, Reserved bits */
+ unsigned int status_rvd:6;
+ /* bit 29 protocol error during processing this descriptor */
+ unsigned int perr:1;
+ /* bit 30 data error during processing this descriptor */
+ unsigned int derr:1;
+ /* bit 31 */
+ unsigned int reserved:1;
+ } bits;
+};
+
+/* GMAC DMA Tx Description Word 1 Register
+ * GMAC0 offset 0x8044
+ * GMAC1 offset 0xC044
+ */
+union gmac_txdesc_1 {
+ unsigned int bits32;
+ struct txdesc_word1 {
+ /* bit 15: 0 Tx Frame Byte Count */
+ unsigned int byte_count:16;
+ /* bit 16 TSS segmentation use MTU setting */
+ unsigned int mtu_enable:1;
+ /* bit 17 IPV4 Header Checksum Enable */
+ unsigned int ip_chksum:1;
+ /* bit 18 IPV6 Tx Enable */
+ unsigned int ipv6_enable:1;
+ /* bit 19 TCP Checksum Enable */
+ unsigned int tcp_chksum:1;
+ /* bit 20 UDP Checksum Enable */
+ unsigned int udp_chksum:1;
+ /* bit 21 Bypass HW offload engine */
+ unsigned int bypass_tss:1;
+ /* bit 22 Don't update IP length field */
+ unsigned int ip_fixed_len:1;
+ /* bit 31:23 Tx Flag, Reserved */
+ unsigned int reserved:9;
+ } bits;
+};
+
+#define TSS_IP_FIXED_LEN_BIT BIT(22)
+#define TSS_BYPASS_BIT BIT(21)
+#define TSS_UDP_CHKSUM_BIT BIT(20)
+#define TSS_TCP_CHKSUM_BIT BIT(19)
+#define TSS_IPV6_ENABLE_BIT BIT(18)
+#define TSS_IP_CHKSUM_BIT BIT(17)
+#define TSS_MTU_ENABLE_BIT BIT(16)
+
+#define TSS_CHECKUM_ENABLE \
+ (TSS_IP_CHKSUM_BIT | TSS_IPV6_ENABLE_BIT | \
+ TSS_TCP_CHKSUM_BIT | TSS_UDP_CHKSUM_BIT)
+
+/* GMAC DMA Tx Description Word 2 Register
+ * GMAC0 offset 0x8048
+ * GMAC1 offset 0xC048
+ */
+union gmac_txdesc_2 {
+ unsigned int bits32;
+ unsigned int buf_adr;
+};
+
+/* GMAC DMA Tx Description Word 3 Register
+ * GMAC0 offset 0x804C
+ * GMAC1 offset 0xC04C
+ */
+union gmac_txdesc_3 {
+ unsigned int bits32;
+ struct txdesc_word3 {
+ /* bit 12: 0 Tx Frame Byte Count */
+ unsigned int mtu_size:13;
+ /* bit 28:13 */
+ unsigned int reserved:16;
+ /* bit 29 End of frame interrupt enable */
+ unsigned int eofie:1;
+ /* bit 31:30 11: only one, 10: first, 01: last, 00: linking */
+ unsigned int sof_eof:2;
+ } bits;
+};
+
+#define SOF_EOF_BIT_MASK 0x3fffffff
+#define SOF_BIT 0x80000000
+#define EOF_BIT 0x40000000
+#define EOFIE_BIT BIT(29)
+#define MTU_SIZE_BIT_MASK 0x1fff
+
+/* GMAC Tx Descriptor */
+struct gmac_txdesc {
+ union gmac_txdesc_0 word0;
+ union gmac_txdesc_1 word1;
+ union gmac_txdesc_2 word2;
+ union gmac_txdesc_3 word3;
+};
+
+/* GMAC DMA Rx Description Word 0 Register
+ * GMAC0 offset 0x8060
+ * GMAC1 offset 0xC060
+ */
+union gmac_rxdesc_0 {
+ unsigned int bits32;
+ struct bit_8060 {
+ /* bit 15:0 number of descriptors used for the current frame */
+ unsigned int buffer_size:16;
+ /* bit 21:16 number of descriptors used for the current frame */
+ unsigned int desc_count:6;
+ /* bit 24:22 Status of rx frame */
+ unsigned int status:4;
+ /* bit 28:26 Check Sum Status */
+ unsigned int chksum_status:3;
+ /* bit 29 protocol error during processing this descriptor */
+ unsigned int perr:1;
+ /* bit 30 data error during processing this descriptor */
+ unsigned int derr:1;
+ /* bit 31 TOE/CIS Queue Full dropped packet to default queue */
+ unsigned int drop:1;
+ } bits;
+};
+
+#define GMAC_RXDESC_0_T_derr BIT(30)
+#define GMAC_RXDESC_0_T_perr BIT(29)
+#define GMAC_RXDESC_0_T_chksum_status(x) BIT(x + 26)
+#define GMAC_RXDESC_0_T_status(x) BIT(x + 22)
+#define GMAC_RXDESC_0_T_desc_count(x) BIT(x + 16)
+
+#define RX_CHKSUM_IP_UDP_TCP_OK 0
+#define RX_CHKSUM_IP_OK_ONLY 1
+#define RX_CHKSUM_NONE 2
+#define RX_CHKSUM_IP_ERR_UNKNOWN 4
+#define RX_CHKSUM_IP_ERR 5
+#define RX_CHKSUM_TCP_UDP_ERR 6
+#define RX_CHKSUM_NUM 8
+
+#define RX_STATUS_GOOD_FRAME 0
+#define RX_STATUS_TOO_LONG_GOOD_CRC 1
+#define RX_STATUS_RUNT_FRAME 2
+#define RX_STATUS_SFD_NOT_FOUND 3
+#define RX_STATUS_CRC_ERROR 4
+#define RX_STATUS_TOO_LONG_BAD_CRC 5
+#define RX_STATUS_ALIGNMENT_ERROR 6
+#define RX_STATUS_TOO_LONG_BAD_ALIGN 7
+#define RX_STATUS_RX_ERR 8
+#define RX_STATUS_DA_FILTERED 9
+#define RX_STATUS_BUFFER_FULL 10
+#define RX_STATUS_NUM 16
+
+#define RX_ERROR_LENGTH(s) \
+ ((s) == RX_STATUS_TOO_LONG_GOOD_CRC || \
+ (s) == RX_STATUS_TOO_LONG_BAD_CRC || \
+ (s) == RX_STATUS_TOO_LONG_BAD_ALIGN)
+#define RX_ERROR_OVER(s) \
+ ((s) == RX_STATUS_BUFFER_FULL)
+#define RX_ERROR_CRC(s) \
+ ((s) == RX_STATUS_CRC_ERROR || \
+ (s) == RX_STATUS_TOO_LONG_BAD_CRC)
+#define RX_ERROR_FRAME(s) \
+ ((s) == RX_STATUS_ALIGNMENT_ERROR || \
+ (s) == RX_STATUS_TOO_LONG_BAD_ALIGN)
+#define RX_ERROR_FIFO(s) \
+ (0)
+
+/* GMAC DMA Rx Description Word 1 Register
+ * GMAC0 offset 0x8064
+ * GMAC1 offset 0xC064
+ */
+union gmac_rxdesc_1 {
+ unsigned int bits32;
+ struct rxdesc_word1 {
+ /* bit 15: 0 Rx Frame Byte Count */
+ unsigned int byte_count:16;
+ /* bit 31:16 Software ID */
+ unsigned int sw_id:16;
+ } bits;
+};
+
+/* GMAC DMA Rx Description Word 2 Register
+ * GMAC0 offset 0x8068
+ * GMAC1 offset 0xC068
+ */
+union gmac_rxdesc_2 {
+ unsigned int bits32;
+ unsigned int buf_adr;
+};
+
+#define RX_INSERT_NONE 0
+#define RX_INSERT_1_BYTE 1
+#define RX_INSERT_2_BYTE 2
+#define RX_INSERT_3_BYTE 3
+
+/* GMAC DMA Rx Description Word 3 Register
+ * GMAC0 offset 0x806C
+ * GMAC1 offset 0xC06C
+ */
+union gmac_rxdesc_3 {
+ unsigned int bits32;
+ struct rxdesc_word3 {
+ /* bit 7: 0 L3 data offset */
+ unsigned int l3_offset:8;
+ /* bit 15: 8 L4 data offset */
+ unsigned int l4_offset:8;
+ /* bit 23: 16 L7 data offset */
+ unsigned int l7_offset:8;
+ /* bit 24 Duplicated ACK detected */
+ unsigned int dup_ack:1;
+ /* bit 25 abnormal case found */
+ unsigned int abnormal:1;
+ /* bit 26 IPV4 option or IPV6 extension header */
+ unsigned int option:1;
+ /* bit 27 Out of Sequence packet */
+ unsigned int out_of_seq:1;
+ /* bit 28 Control Flag is present */
+ unsigned int ctrl_flag:1;
+ /* bit 29 End of frame interrupt enable */
+ unsigned int eofie:1;
+ /* bit 31:30 11: only one, 10: first, 01: last, 00: linking */
+ unsigned int sof_eof:2;
+ } bits;
+};
+
+/* GMAC Rx Descriptor, this is simply fitted over the queue registers */
+struct gmac_rxdesc {
+ union gmac_rxdesc_0 word0;
+ union gmac_rxdesc_1 word1;
+ union gmac_rxdesc_2 word2;
+ union gmac_rxdesc_3 word3;
+};
+
+/* GMAC Matching Rule Control Register 0
+ * GMAC0 offset 0x8078
+ * GMAC1 offset 0xC078
+ */
+#define MR_L2_BIT BIT(31)
+#define MR_L3_BIT BIT(30)
+#define MR_L4_BIT BIT(29)
+#define MR_L7_BIT BIT(28)
+#define MR_PORT_BIT BIT(27)
+#define MR_PRIORITY_BIT BIT(26)
+#define MR_DA_BIT BIT(23)
+#define MR_SA_BIT BIT(22)
+#define MR_ETHER_TYPE_BIT BIT(21)
+#define MR_VLAN_BIT BIT(20)
+#define MR_PPPOE_BIT BIT(19)
+#define MR_IP_VER_BIT BIT(15)
+#define MR_IP_HDR_LEN_BIT BIT(14)
+#define MR_FLOW_LABLE_BIT BIT(13)
+#define MR_TOS_TRAFFIC_BIT BIT(12)
+#define MR_SPR_BIT(x) BIT(x)
+#define MR_SPR_BITS 0xff
+
+/* GMAC_AHB_WEIGHT registers
+ * GMAC0 offset 0x80C8
+ * GMAC1 offset 0xC0C8
+ */
+union gmac_ahb_weight {
+ unsigned int bits32;
+ struct bit_80C8 {
+ /* 4:0 */
+ unsigned int hash_weight:5;
+ /* 9:5 */
+ unsigned int rx_weight:5;
+ /* 14:10 */
+ unsigned int tx_weight:5;
+ /* 19:15 Rx Data Pre Request FIFO Threshold */
+ unsigned int pre_req:5;
+ /* 24:20 DMA TqCtrl to Start tqDV FIFO Threshold */
+ unsigned int tq_dv_threshold:5;
+ /* 31:25 */
+ unsigned int reserved:7;
+ } bits;
+};
+
+/* GMAC RX FLTR
+ * GMAC0 Offset 0xA00C
+ * GMAC1 Offset 0xE00C
+ */
+union gmac_rx_fltr {
+ unsigned int bits32;
+ struct bit1_000c {
+ /* Enable receive of unicast frames that are sent to STA
+ * address
+ */
+ unsigned int unicast:1;
+ /* Enable receive of multicast frames that pass multicast
+ * filter
+ */
+ unsigned int multicast:1;
+ /* Enable receive of broadcast frames */
+ unsigned int broadcast:1;
+ /* Enable receive of all frames */
+ unsigned int promiscuous:1;
+ /* Enable receive of all error frames */
+ unsigned int error:1;
+ unsigned int reserved:27;
+ } bits;
+};
+
+/* GMAC Configuration 0
+ * GMAC0 Offset 0xA018
+ * GMAC1 Offset 0xE018
+ */
+union gmac_config0 {
+ unsigned int bits32;
+ struct bit1_0018 {
+ /* 0: disable transmit */
+ unsigned int dis_tx:1;
+ /* 1: disable receive */
+ unsigned int dis_rx:1;
+ /* 2: transmit data loopback enable */
+ unsigned int loop_back:1;
+ /* 3: flow control also trigged by Rx queues */
+ unsigned int flow_ctrl:1;
+ /* 4-7: adjust IFG from 96+/-56 */
+ unsigned int adj_ifg:4;
+ /* 8-10 maximum receive frame length allowed */
+ unsigned int max_len:3;
+ /* 11: disable back-off function */
+ unsigned int dis_bkoff:1;
+ /* 12: disable 16 collisions abort function */
+ unsigned int dis_col:1;
+ /* 13: speed up timers in simulation */
+ unsigned int sim_test:1;
+ /* 14: RX flow control enable */
+ unsigned int rx_fc_en:1;
+ /* 15: TX flow control enable */
+ unsigned int tx_fc_en:1;
+ /* 16: RGMII in-band status enable */
+ unsigned int rgmii_en:1;
+ /* 17: IPv4 RX Checksum enable */
+ unsigned int ipv4_rx_chksum:1;
+ /* 18: IPv6 RX Checksum enable */
+ unsigned int ipv6_rx_chksum:1;
+ /* 19: Remove Rx VLAN tag */
+ unsigned int rx_tag_remove:1;
+ /* 20 */
+ unsigned int rgmm_edge:1;
+ /* 21 */
+ unsigned int rxc_inv:1;
+ /* 22 */
+ unsigned int ipv6_exthdr_order:1;
+ /* 23 */
+ unsigned int rx_err_detect:1;
+ /* 24 */
+ unsigned int port0_chk_hwq:1;
+ /* 25 */
+ unsigned int port1_chk_hwq:1;
+ /* 26 */
+ unsigned int port0_chk_toeq:1;
+ /* 27 */
+ unsigned int port1_chk_toeq:1;
+ /* 28 */
+ unsigned int port0_chk_classq:1;
+ /* 29 */
+ unsigned int port1_chk_classq:1;
+ /* 30, 31 */
+ unsigned int reserved:2;
+ } bits;
+};
+
+#define CONFIG0_TX_RX_DISABLE (BIT(1) | BIT(0))
+#define CONFIG0_RX_CHKSUM (BIT(18) | BIT(17))
+#define CONFIG0_FLOW_RX BIT(14)
+#define CONFIG0_FLOW_TX BIT(15)
+#define CONFIG0_FLOW_TX_RX (BIT(14) | BIT(15))
+#define CONFIG0_FLOW_CTL (BIT(14) | BIT(15))
+
+#define CONFIG0_MAXLEN_SHIFT 8
+#define CONFIG0_MAXLEN_MASK (7 << CONFIG0_MAXLEN_SHIFT)
+#define CONFIG0_MAXLEN_1536 0
+#define CONFIG0_MAXLEN_1518 1
+#define CONFIG0_MAXLEN_1522 2
+#define CONFIG0_MAXLEN_1542 3
+#define CONFIG0_MAXLEN_9k 4 /* 9212 */
+#define CONFIG0_MAXLEN_10k 5 /* 10236 */
+#define CONFIG0_MAXLEN_1518__6 6
+#define CONFIG0_MAXLEN_1518__7 7
+
+/* GMAC Configuration 1
+ * GMAC0 Offset 0xA01C
+ * GMAC1 Offset 0xE01C
+ */
+union gmac_config1 {
+ unsigned int bits32;
+ struct bit1_001c {
+ /* Flow control set threshold */
+ unsigned int set_threshold:8;
+ /* Flow control release threshold */
+ unsigned int rel_threshold:8;
+ unsigned int reserved:16;
+ } bits;
+};
+
+#define GMAC_FLOWCTRL_SET_MAX 32
+#define GMAC_FLOWCTRL_SET_MIN 0
+#define GMAC_FLOWCTRL_RELEASE_MAX 32
+#define GMAC_FLOWCTRL_RELEASE_MIN 0
+
+/* GMAC Configuration 2
+ * GMAC0 Offset 0xA020
+ * GMAC1 Offset 0xE020
+ */
+union gmac_config2 {
+ unsigned int bits32;
+ struct bit1_0020 {
+ /* Flow control set threshold */
+ unsigned int set_threshold:16;
+ /* Flow control release threshold */
+ unsigned int rel_threshold:16;
+ } bits;
+};
+
+/* GMAC Configuration 3
+ * GMAC0 Offset 0xA024
+ * GMAC1 Offset 0xE024
+ */
+union gmac_config3 {
+ unsigned int bits32;
+ struct bit1_0024 {
+ /* Flow control set threshold */
+ unsigned int set_threshold:16;
+ /* Flow control release threshold */
+ unsigned int rel_threshold:16;
+ } bits;
+};
+
+/* GMAC STATUS
+ * GMAC0 Offset 0xA02C
+ * GMAC1 Offset 0xE02C
+ */
+union gmac_status {
+ unsigned int bits32;
+ struct bit1_002c {
+ /* Link status */
+ unsigned int link:1;
+ /* Link speed(00->2.5M 01->25M 10->125M) */
+ unsigned int speed:2;
+ /* Duplex mode */
+ unsigned int duplex:1;
+ unsigned int reserved_1:1;
+ /* PHY interface type */
+ unsigned int mii_rmii:2;
+ unsigned int reserved_2:25;
+ } bits;
+};
+
+#define GMAC_SPEED_10 0
+#define GMAC_SPEED_100 1
+#define GMAC_SPEED_1000 2
+
+#define GMAC_PHY_MII 0
+#define GMAC_PHY_GMII 1
+#define GMAC_PHY_RGMII_100_10 2
+#define GMAC_PHY_RGMII_1000 3
+
+/* Queue Header
+ * (1) TOE Queue Header
+ * (2) Non-TOE Queue Header
+ * (3) Interrupt Queue Header
+ *
+ * memory Layout
+ * TOE Queue Header
+ * 0x60003000 +---------------------------+ 0x0000
+ * | TOE Queue 0 Header |
+ * | 8 * 4 Bytes |
+ * +---------------------------+ 0x0020
+ * | TOE Queue 1 Header |
+ * | 8 * 4 Bytes |
+ * +---------------------------+ 0x0040
+ * | ...... |
+ * | |
+ * +---------------------------+
+ *
+ * Non TOE Queue Header
+ * 0x60002000 +---------------------------+ 0x0000
+ * | Default Queue 0 Header |
+ * | 2 * 4 Bytes |
+ * +---------------------------+ 0x0008
+ * | Default Queue 1 Header |
+ * | 2 * 4 Bytes |
+ * +---------------------------+ 0x0010
+ * | Classification Queue 0 |
+ * | 2 * 4 Bytes |
+ * +---------------------------+
+ * | Classification Queue 1 |
+ * | 2 * 4 Bytes |
+ * +---------------------------+ (n * 8 + 0x10)
+ * | ... |
+ * | 2 * 4 Bytes |
+ * +---------------------------+ (13 * 8 + 0x10)
+ * | Classification Queue 13 |
+ * | 2 * 4 Bytes |
+ * +---------------------------+ 0x80
+ * | Interrupt Queue 0 |
+ * | 2 * 4 Bytes |
+ * +---------------------------+
+ * | Interrupt Queue 1 |
+ * | 2 * 4 Bytes |
+ * +---------------------------+
+ * | Interrupt Queue 2 |
+ * | 2 * 4 Bytes |
+ * +---------------------------+
+ * | Interrupt Queue 3 |
+ * | 2 * 4 Bytes |
+ * +---------------------------+
+ *
+ */
+#define TOE_QUEUE_HDR_ADDR(n) (TOE_TOE_QUE_HDR_BASE + n * 32)
+#define TOE_Q_HDR_AREA_END (TOE_QUEUE_HDR_ADDR(TOE_TOE_QUEUE_MAX + 1))
+#define TOE_DEFAULT_Q_HDR_BASE(x) (TOE_NONTOE_QUE_HDR_BASE + 0x08 * (x))
+#define TOE_CLASS_Q_HDR_BASE (TOE_NONTOE_QUE_HDR_BASE + 0x10)
+#define TOE_INTR_Q_HDR_BASE (TOE_NONTOE_QUE_HDR_BASE + 0x80)
+#define INTERRUPT_QUEUE_HDR_ADDR(n) (TOE_INTR_Q_HDR_BASE + n * 8)
+#define NONTOE_Q_HDR_AREA_END (INTERRUPT_QUEUE_HDR_ADDR(TOE_INTR_QUEUE_MAX + 1))
+
+/* NONTOE Queue Header Word 0 */
+union nontoe_qhdr0 {
+ unsigned int bits32;
+ unsigned int base_size;
+};
+
+#define NONTOE_QHDR0_BASE_MASK (~0x0f)
+
+/* NONTOE Queue Header Word 1 */
+union nontoe_qhdr1 {
+ unsigned int bits32;
+ struct bit_nonqhdr1 {
+ /* bit 15:0 */
+ unsigned int rptr:16;
+ /* bit 31:16 */
+ unsigned int wptr:16;
+ } bits;
+};
+
+/* Non-TOE Queue Header */
+struct nontoe_qhdr {
+ union nontoe_qhdr0 word0;
+ union nontoe_qhdr1 word1;
+};
+
+#endif /* _GEMINI_ETHERNET_H */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 02dd5246dfae..1a49297224ed 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -103,7 +103,7 @@ static struct be_cmd_priv_map cmd_priv_map[] = {
static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
{
int i;
- int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
+ int num_entries = ARRAY_SIZE(cmd_priv_map);
u32 cmd_privileges = adapter->cmd_privileges;
for (i = 0; i < num_entries; i++)
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index e180657a02ef..c36c81959198 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4634,8 +4634,7 @@ int be_update_queues(struct be_adapter *adapter)
be_schedule_worker(adapter);
- /*
- * The IF was destroyed and re-created. We need to clear
+ /* The IF was destroyed and re-created. We need to clear
* all promiscuous flags valid for the destroyed IF.
* Without this promisc mode is not restored during
* be_open() because the driver thinks that it is
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 5385074b3b7d..e7381f8ef89d 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -20,7 +20,8 @@
#include <linux/timecounter.h>
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
+ defined(CONFIG_ARM64)
/*
* Just figures, Motorola would have to change the offsets for
* registers in the same peripheral device on different models
@@ -195,7 +196,7 @@
* Evidently, ARM SoCs have the FEC block generated in a
* little endian mode so adjust endianness accordingly.
*/
-#if defined(CONFIG_ARM)
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
#define fec32_to_cpu le32_to_cpu
#define fec16_to_cpu le16_to_cpu
#define cpu_to_fec32 cpu_to_le32
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index a74300a4459c..7a7f3a42b2aa 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -195,7 +195,8 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
* account when setting it.
*/
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
+ defined(CONFIG_ARM64)
#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
#else
#define OPT_FRAME_SIZE 0
@@ -1868,6 +1869,8 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
ret = clk_prepare_enable(fep->clk_ref);
if (ret)
goto failed_clk_ref;
+
+ phy_reset_after_clk_enable(ndev->phydev);
} else {
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_enet_out);
@@ -2107,7 +2110,8 @@ static int fec_enet_get_regs_len(struct net_device *ndev)
/* List of registers that can be safety be read to dump them with ethtool */
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
+ defined(CONFIG_ARM64)
static u32 fec_enet_register_offset[] = {
FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
@@ -2840,6 +2844,7 @@ fec_enet_open(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
int ret;
+ bool reset_again;
ret = pm_runtime_get_sync(&fep->pdev->dev);
if (ret < 0)
@@ -2850,6 +2855,17 @@ fec_enet_open(struct net_device *ndev)
if (ret)
goto clk_enable;
+ /* During the first fec_enet_open call the PHY isn't probed at this
+ * point. Therefore the phy_reset_after_clk_enable() call within
+ * fec_enet_clk_enable() fails. As we need this reset in order to be
+ * sure the PHY is working correctly we check if we need to reset again
+ * later when the PHY is probed
+ */
+ if (ndev->phydev && ndev->phydev->drv)
+ reset_again = false;
+ else
+ reset_again = true;
+
/* I should reset the ring buffers here, but I don't yet know
* a simple way to do that.
*/
@@ -2866,6 +2882,12 @@ fec_enet_open(struct net_device *ndev)
if (ret)
goto err_enet_mii_probe;
+ /* Call phy_reset_after_clk_enable() again if it failed during
+ * phy_reset_after_clk_enable() before because the PHY wasn't probed.
+ */
+ if (reset_again)
+ phy_reset_after_clk_enable(ndev->phydev);
+
if (fep->quirks & FEC_QUIRK_ERR006687)
imx6q_cpuidle_fec_irqs_used();
@@ -3119,7 +3141,7 @@ static int fec_enet_init(struct net_device *ndev)
unsigned dsize_log2 = __fls(dsize);
WARN_ON(dsize != (1 << dsize_log2));
-#if defined(CONFIG_ARM)
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
fep->rx_align = 0xf;
fep->tx_align = 0xf;
#else
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 7f837006bb6a..3bdeb295514b 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2932,7 +2932,7 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id)
static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
struct sk_buff *skb, bool first)
{
- unsigned int size = lstatus & BD_LENGTH_MASK;
+ int size = lstatus & BD_LENGTH_MASK;
struct page *page = rxb->page;
bool last = !!(lstatus & BD_LFLAG(RXBD_LAST));
@@ -2947,11 +2947,16 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
if (last)
size -= skb->len;
- /* in case the last fragment consisted only of the FCS */
+ /* Add the last fragment if it contains something other than
+ * the FCS, otherwise drop it and trim off any part of the FCS
+ * that was already received.
+ */
if (size > 0)
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
rxb->page_offset + RXBUF_ALIGNMENT,
size, GFAR_RXB_TRUESIZE);
+ else if (size < 0)
+ pskb_trim(skb, skb->len + size);
}
/* try reuse page */
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 30000b6aa7b8..8bcf470ff5f3 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -94,15 +94,6 @@ config HNS3_HCLGE
compatibility layer. The engine would be used in Hisilicon hip08 family of
SoCs and further upcoming SoCs.
-config HNS3_ENET
- tristate "Hisilicon HNS3 Ethernet Device Support"
- depends on 64BIT && PCI
- depends on HNS3 && HNS3_HCLGE
- ---help---
- This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
- family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
- devices and their associated operations.
-
config HNS3_DCB
bool "Hisilicon HNS3 Data Center Bridge Support"
default n
@@ -112,4 +103,23 @@ config HNS3_DCB
If unsure, say N.
+config HNS3_HCLGEVF
+ tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support"
+ depends on PCI_MSI
+ depends on HNS3
+ depends on HNS3_HCLGE
+ ---help---
+ This selects the HNS3 VF drivers network acceleration engine & its hardware
+ compatibility layer. The engine would be used in Hisilicon hip08 family of
+ SoCs and further upcoming SoCs.
+
+config HNS3_ENET
+ tristate "Hisilicon HNS3 Ethernet Device Support"
+ depends on 64BIT && PCI
+ depends on HNS3
+ ---help---
+ This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
+ family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
+ devices and their associated operations.
+
endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 8b5cdf490850..cac86e9ae0dd 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -1168,7 +1168,7 @@ void hns_set_led_opt(struct hns_mac_cb *mac_cb)
int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
enum hnae_led_state status)
{
- if (!mac_cb || !mac_cb->cpld_ctrl)
+ if (!mac_cb)
return 0;
return mac_cb->dsaf_dev->misc_op->cpld_set_led_id(mac_cb, status);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 408b63faf9a8..acf29633ec79 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -18,6 +18,7 @@ enum _dsm_op_index {
HNS_OP_LED_SET_FUNC = 0x3,
HNS_OP_GET_PORT_TYPE_FUNC = 0x4,
HNS_OP_GET_SFP_STAT_FUNC = 0x5,
+ HNS_OP_LOCATE_LED_SET_FUNC = 0x6,
};
enum _dsm_rst_type {
@@ -43,12 +44,17 @@ static void dsaf_write_sub(struct dsaf_device *dsaf_dev, u32 reg, u32 val)
static u32 dsaf_read_sub(struct dsaf_device *dsaf_dev, u32 reg)
{
- u32 ret;
-
- if (dsaf_dev->sub_ctrl)
- ret = dsaf_read_syscon(dsaf_dev->sub_ctrl, reg);
- else
+ u32 ret = 0;
+ int err;
+
+ if (dsaf_dev->sub_ctrl) {
+ err = dsaf_read_syscon(dsaf_dev->sub_ctrl, reg, &ret);
+ if (err)
+ dev_err(dsaf_dev->dev, "dsaf_read_syscon error %d!\n",
+ err);
+ } else {
ret = dsaf_read_reg(dsaf_dev->sc_base, reg);
+ }
return ret;
}
@@ -81,6 +87,33 @@ static void hns_dsaf_acpi_ledctrl_by_port(struct hns_mac_cb *mac_cb, u8 op_type,
ACPI_FREE(obj);
}
+static void hns_dsaf_acpi_locate_ledctrl_by_port(struct hns_mac_cb *mac_cb,
+ u8 op_type, u32 locate,
+ u32 port)
+{
+ union acpi_object obj_args[2], argv4;
+ union acpi_object *obj;
+
+ obj_args[0].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[0].integer.value = locate;
+ obj_args[1].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[1].integer.value = port;
+
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 2;
+ argv4.package.elements = obj_args;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
+ &hns_dsaf_acpi_dsm_guid, 0, op_type, &argv4);
+ if (!obj) {
+ dev_err(mac_cb->dev, "ledctrl fail, locate:%d port:%d!\n",
+ locate, port);
+ return;
+ }
+
+ ACPI_FREE(obj);
+}
+
static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
u16 speed, int data)
{
@@ -160,15 +193,23 @@ static void cpld_led_reset_acpi(struct hns_mac_cb *mac_cb)
static int cpld_set_led_id(struct hns_mac_cb *mac_cb,
enum hnae_led_state status)
{
+ u32 val = 0;
+ int ret;
+
+ if (!mac_cb->cpld_ctrl)
+ return 0;
+
switch (status) {
case HNAE_LED_ACTIVE:
- mac_cb->cpld_led_value =
- dsaf_read_syscon(mac_cb->cpld_ctrl,
- mac_cb->cpld_ctrl_reg);
- dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
- CPLD_LED_ON_VALUE);
+ ret = dsaf_read_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+ &val);
+ if (ret)
+ return ret;
+
+ dsaf_set_bit(val, DSAF_LED_ANCHOR_B, CPLD_LED_ON_VALUE);
dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
- mac_cb->cpld_led_value);
+ val);
+ mac_cb->cpld_led_value = val;
break;
case HNAE_LED_INACTIVE:
dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
@@ -184,6 +225,30 @@ static int cpld_set_led_id(struct hns_mac_cb *mac_cb,
return 0;
}
+static int cpld_set_led_id_acpi(struct hns_mac_cb *mac_cb,
+ enum hnae_led_state status)
+{
+ switch (status) {
+ case HNAE_LED_ACTIVE:
+ hns_dsaf_acpi_locate_ledctrl_by_port(mac_cb,
+ HNS_OP_LOCATE_LED_SET_FUNC,
+ CPLD_LED_ON_VALUE,
+ mac_cb->mac_id);
+ break;
+ case HNAE_LED_INACTIVE:
+ hns_dsaf_acpi_locate_ledctrl_by_port(mac_cb,
+ HNS_OP_LOCATE_LED_SET_FUNC,
+ CPLD_LED_DEFAULT_VALUE,
+ mac_cb->mac_id);
+ break;
+ default:
+ dev_err(mac_cb->dev, "invalid led state: %d!", status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
#define RESET_REQ_OR_DREQ 1
static void hns_dsaf_acpi_srst_by_port(struct dsaf_device *dsaf_dev, u8 op_type,
@@ -505,12 +570,19 @@ static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb)
int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
{
+ u32 val = 0;
+ int ret;
+
if (!mac_cb->cpld_ctrl)
return -ENODEV;
- *sfp_prsnt = !dsaf_read_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg
- + MAC_SFP_PORT_OFFSET);
+ ret = dsaf_read_syscon(mac_cb->cpld_ctrl,
+ mac_cb->cpld_ctrl_reg + MAC_SFP_PORT_OFFSET,
+ &val);
+ if (ret)
+ return ret;
+ *sfp_prsnt = !val;
return 0;
}
@@ -560,7 +632,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
#define RX_CSR(lane, reg) ((0x4080 + (reg) * 0x0002 + (lane) * 0x0200) * 2)
u64 reg_offset = RX_CSR(lane_id[mac_cb->mac_id], 0);
- int sfp_prsnt;
+ int sfp_prsnt = 0;
int ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt);
if (!mac_cb->phy_dev) {
@@ -572,7 +644,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
}
if (mac_cb->serdes_ctrl) {
- u32 origin;
+ u32 origin = 0;
if (!AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver)) {
#define HILINK_ACCESS_SEL_CFG 0x40008
@@ -589,7 +661,10 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
HILINK_ACCESS_SEL_CFG, 3);
}
- origin = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset);
+ ret = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset,
+ &origin);
+ if (ret)
+ return ret;
dsaf_set_field(origin, 1ull << 10, 10, en);
dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
@@ -660,7 +735,7 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
} else if (is_acpi_node(dsaf_dev->dev->fwnode)) {
misc_op->cpld_set_led = hns_cpld_set_led_acpi;
misc_op->cpld_reset_led = cpld_led_reset_acpi;
- misc_op->cpld_set_led_id = cpld_set_led_id;
+ misc_op->cpld_set_led_id = cpld_set_led_id_acpi;
misc_op->dsaf_reset = hns_dsaf_rst_acpi;
misc_op->xge_srst = hns_dsaf_xge_srst_by_port_acpi;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 46a52d9bb196..886cbbf25761 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -1034,12 +1034,9 @@ static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value)
regmap_write(base, reg, value);
}
-static inline u32 dsaf_read_syscon(struct regmap *base, u32 reg)
+static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val)
{
- unsigned int val;
-
- regmap_read(base, reg, &val);
- return val;
+ return regmap_read(base, reg, val);
}
#define dsaf_read_dev(a, reg) \
diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile
index a9349e1f3e51..002534f12b66 100644
--- a/drivers/net/ethernet/hisilicon/hns3/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/Makefile
@@ -1,7 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0+
#
# Makefile for the HISILICON network device drivers.
#
obj-$(CONFIG_HNS3) += hns3pf/
+obj-$(CONFIG_HNS3) += hns3vf/
obj-$(CONFIG_HNS3) += hnae3.o
+
+obj-$(CONFIG_HNS3_ENET) += hns3.o
+hns3-objs = hns3_enet.o hns3_ethtool.o
+
+hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
new file mode 100644
index 000000000000..3e9203ea42a6
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGE_MBX_H
+#define __HCLGE_MBX_H
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#define HCLGE_MBX_VF_MSG_DATA_NUM 16
+
+enum HCLGE_MBX_OPCODE {
+ HCLGE_MBX_RESET = 0x01, /* (VF -> PF) assert reset */
+ HCLGE_MBX_SET_UNICAST, /* (VF -> PF) set UC addr */
+ HCLGE_MBX_SET_MULTICAST, /* (VF -> PF) set MC addr */
+ HCLGE_MBX_SET_VLAN, /* (VF -> PF) set VLAN */
+ HCLGE_MBX_MAP_RING_TO_VECTOR, /* (VF -> PF) map ring-to-vector */
+ HCLGE_MBX_UNMAP_RING_TO_VECTOR, /* (VF -> PF) unamp ring-to-vector */
+ HCLGE_MBX_SET_PROMISC_MODE, /* (VF -> PF) set promiscuous mode */
+ HCLGE_MBX_SET_MACVLAN, /* (VF -> PF) set unicast filter */
+ HCLGE_MBX_API_NEGOTIATE, /* (VF -> PF) negotiate API version */
+ HCLGE_MBX_GET_QINFO, /* (VF -> PF) get queue config */
+ HCLGE_MBX_GET_TCINFO, /* (VF -> PF) get TC config */
+ HCLGE_MBX_GET_RETA, /* (VF -> PF) get RETA */
+ HCLGE_MBX_GET_RSS_KEY, /* (VF -> PF) get RSS key */
+ HCLGE_MBX_GET_MAC_ADDR, /* (VF -> PF) get MAC addr */
+ HCLGE_MBX_PF_VF_RESP, /* (PF -> VF) generate respone to VF */
+ HCLGE_MBX_GET_BDNUM, /* (VF -> PF) get BD num */
+ HCLGE_MBX_GET_BUFSIZE, /* (VF -> PF) get buffer size */
+ HCLGE_MBX_GET_STREAMID, /* (VF -> PF) get stream id */
+ HCLGE_MBX_SET_AESTART, /* (VF -> PF) start ae */
+ HCLGE_MBX_SET_TSOSTATS, /* (VF -> PF) get tso stats */
+ HCLGE_MBX_LINK_STAT_CHANGE, /* (PF -> VF) link status has changed */
+ HCLGE_MBX_GET_BASE_CONFIG, /* (VF -> PF) get config */
+ HCLGE_MBX_BIND_FUNC_QUEUE, /* (VF -> PF) bind function and queue */
+ HCLGE_MBX_GET_LINK_STATUS, /* (VF -> PF) get link status */
+ HCLGE_MBX_QUEUE_RESET, /* (VF -> PF) reset queue */
+};
+
+/* below are per-VF mac-vlan subcodes */
+enum hclge_mbx_mac_vlan_subcode {
+ HCLGE_MBX_MAC_VLAN_UC_MODIFY = 0, /* modify UC mac addr */
+ HCLGE_MBX_MAC_VLAN_UC_ADD, /* add a new UC mac addr */
+ HCLGE_MBX_MAC_VLAN_UC_REMOVE, /* remove a new UC mac addr */
+ HCLGE_MBX_MAC_VLAN_MC_MODIFY, /* modify MC mac addr */
+ HCLGE_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */
+ HCLGE_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */
+ HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, /* config func MTA enable */
+};
+
+/* below are per-VF vlan cfg subcodes */
+enum hclge_mbx_vlan_cfg_subcode {
+ HCLGE_MBX_VLAN_FILTER = 0, /* set vlan filter */
+ HCLGE_MBX_VLAN_TX_OFF_CFG, /* set tx side vlan offload */
+ HCLGE_MBX_VLAN_RX_OFF_CFG, /* set rx side vlan offload */
+};
+
+#define HCLGE_MBX_MAX_MSG_SIZE 16
+#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8
+
+struct hclgevf_mbx_resp_status {
+ struct mutex mbx_mutex; /* protects against contending sync cmd resp */
+ u32 origin_mbx_msg;
+ bool received_resp;
+ int resp_status;
+ u8 additional_info[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+};
+
+struct hclge_mbx_vf_to_pf_cmd {
+ u8 rsv;
+ u8 mbx_src_vfid; /* Auto filled by IMP */
+ u8 rsv1[2];
+ u8 msg_len;
+ u8 rsv2[3];
+ u8 msg[HCLGE_MBX_MAX_MSG_SIZE];
+};
+
+struct hclge_mbx_pf_to_vf_cmd {
+ u8 dest_vfid;
+ u8 rsv[3];
+ u8 msg_len;
+ u8 rsv1[3];
+ u16 msg[8];
+};
+
+#define hclge_mbx_ring_ptr_move_crq(crq) \
+ (crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 5bcb2238acb2..02145f2de820 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -196,9 +196,18 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
const struct pci_device_id *id;
struct hnae3_ae_algo *ae_algo;
struct hnae3_client *client;
- int ret = 0;
+ int ret = 0, lock_acquired;
+
+ /* we can get deadlocked if SRIOV is being enabled in context to probe
+ * and probe gets called again in same context. This can happen when
+ * pci_enable_sriov() is called to create VFs from PF probes context.
+ * Therefore, for simplicity uniformly defering further probing in all
+ * cases where we detect contention.
+ */
+ lock_acquired = mutex_trylock(&hnae3_common_lock);
+ if (!lock_acquired)
+ return -EPROBE_DEFER;
- mutex_lock(&hnae3_common_lock);
list_add_tail(&ae_dev->node, &hnae3_ae_dev_list);
/* Check if there are matched ae_algo */
@@ -211,6 +220,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
if (!ae_dev->ops) {
dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n");
+ ret = -EOPNOTSUPP;
goto out_err;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 67c59e1039f2..fd06bc78c58e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -133,11 +133,16 @@ struct hnae3_vector_info {
#define HNAE3_RING_TYPE_B 0
#define HNAE3_RING_TYPE_TX 0
#define HNAE3_RING_TYPE_RX 1
+#define HNAE3_RING_GL_IDX_S 0
+#define HNAE3_RING_GL_IDX_M GENMASK(1, 0)
+#define HNAE3_RING_GL_RX 0
+#define HNAE3_RING_GL_TX 1
struct hnae3_ring_chain_node {
struct hnae3_ring_chain_node *next;
u32 tqp_index;
u32 flag;
+ u32 int_gl_idx;
};
#define HNAE3_IS_TX_RING(node) \
@@ -274,10 +279,14 @@ struct hnae3_ae_dev {
* Get firmware version
* get_mdix_mode()
* Get media typr of phy
+ * enable_vlan_filter()
+ * Enable vlan filter
* set_vlan_filter()
* Set vlan filter config of Ports
* set_vf_vlan_filter()
* Set vlan filter config of vf
+ * enable_hw_strip_rxvtag()
+ * Enable/disable hardware strip vlan tag of packets received
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -347,7 +356,8 @@ struct hnae3_ae_ops {
u32 stringset, u8 *data);
int (*get_sset_count)(struct hnae3_handle *handle, int stringset);
- void (*get_regs)(struct hnae3_handle *handle, void *data);
+ void (*get_regs)(struct hnae3_handle *handle, u32 *version,
+ void *data);
int (*get_regs_len)(struct hnae3_handle *handle);
u32 (*get_rss_key_size)(struct hnae3_handle *handle);
@@ -380,12 +390,23 @@ struct hnae3_ae_ops {
void (*get_mdix_mode)(struct hnae3_handle *handle,
u8 *tp_mdix_ctrl, u8 *tp_mdix);
+ void (*enable_vlan_filter)(struct hnae3_handle *handle, bool enable);
int (*set_vlan_filter)(struct hnae3_handle *handle, __be16 proto,
u16 vlan_id, bool is_kill);
int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid,
u16 vlan, u8 qos, __be16 proto);
+ int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable);
void (*reset_event)(struct hnae3_handle *handle,
enum hnae3_reset_type reset);
+ void (*get_channels)(struct hnae3_handle *handle,
+ struct ethtool_channels *ch);
+ void (*get_tqps_and_rss_info)(struct hnae3_handle *h,
+ u16 *free_tqps, u16 *max_rss_size);
+ int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num);
+ void (*get_flowctrl_adv)(struct hnae3_handle *handle,
+ u32 *flowctrl_adv);
+ int (*set_led_id)(struct hnae3_handle *handle,
+ enum ethtool_phys_id_state status);
};
struct hnae3_dcb_ops {
@@ -435,6 +456,8 @@ struct hnae3_knic_private_info {
u16 num_tqps; /* total number of TQPs in this handle */
struct hnae3_queue **tqp; /* array base of all TQPs in this instance */
const struct hnae3_dcb_ops *dcb_ops;
+
+ u16 int_rl_setting;
};
struct hnae3_roce_private_info {
@@ -452,9 +475,10 @@ struct hnae3_unic_private_info {
struct hnae3_queue **tqp; /* array base of all TQPs of this instance */
};
-#define HNAE3_SUPPORT_MAC_LOOPBACK 1
-#define HNAE3_SUPPORT_PHY_LOOPBACK 2
-#define HNAE3_SUPPORT_SERDES_LOOPBACK 4
+#define HNAE3_SUPPORT_MAC_LOOPBACK BIT(0)
+#define HNAE3_SUPPORT_PHY_LOOPBACK BIT(1)
+#define HNAE3_SUPPORT_SERDES_LOOPBACK BIT(2)
+#define HNAE3_SUPPORT_VF BIT(3)
struct hnae3_handle {
struct hnae3_client *client;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
index 925619a7c50a..eb82700da7d0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -93,7 +93,7 @@ void hns3_dcbnl_setup(struct hnae3_handle *handle)
{
struct net_device *dev = handle->kinfo.netdev;
- if (!handle->kinfo.dcb_ops)
+ if ((!handle->kinfo.dcb_ops) || (handle->flags & HNAE3_SUPPORT_VF))
return;
dev->dcbnl_ops = &hns3_dcbnl_ops;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 59415090ff0f..601b6295d3f8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -52,6 +52,8 @@ static const struct pci_device_id hns3_pci_tbl[] = {
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
/* required last entry */
{0, }
};
@@ -156,43 +158,68 @@ static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
napi_disable(&tqp_vector->napi);
}
-static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector,
- u32 gl_value)
+void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 rl_value)
{
- /* this defines the configuration for GL (Interrupt Gap Limiter)
- * GL defines inter interrupt gap.
- * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
- */
- writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
- writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
- writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET);
-}
+ u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
-static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector,
- u32 rl_value)
-{
/* this defines the configuration for RL (Interrupt Rate Limiter).
* Rl defines rate of interrupts i.e. number of interrupts-per-second
* GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
*/
- writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
+
+ if (rl_reg > 0 && !tqp_vector->tx_group.gl_adapt_enable &&
+ !tqp_vector->rx_group.gl_adapt_enable)
+ /* According to the hardware, the range of rl_reg is
+ * 0-59 and the unit is 4.
+ */
+ rl_reg |= HNS3_INT_RL_ENABLE_MASK;
+
+ writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
+}
+
+void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 gl_value)
+{
+ u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
+
+ writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
+}
+
+void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 gl_value)
+{
+ u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
+
+ writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
}
-static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector)
+static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
+ struct hns3_nic_priv *priv)
{
+ struct hnae3_handle *h = priv->ae_handle;
+
/* initialize the configuration for interrupt coalescing.
* 1. GL (Interrupt Gap Limiter)
* 2. RL (Interrupt Rate Limiter)
*/
- /* Default :enable interrupt coalesce */
- tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
+ /* Default: enable interrupt coalescing self-adaptive and GL */
+ tqp_vector->tx_group.gl_adapt_enable = 1;
+ tqp_vector->rx_group.gl_adapt_enable = 1;
+
tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K;
- hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K);
- /* for now we are disabling Interrupt RL - we
- * will re-enable later
- */
- hns3_set_vector_coalesc_rl(tqp_vector, 0);
+ tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
+
+ hns3_set_vector_coalesce_tx_gl(tqp_vector,
+ tqp_vector->tx_group.int_gl);
+ hns3_set_vector_coalesce_rx_gl(tqp_vector,
+ tqp_vector->rx_group.int_gl);
+
+ /* Default: disable RL */
+ h->kinfo.int_rl_setting = 0;
+ hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
+
tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW;
tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW;
}
@@ -245,6 +272,8 @@ static int hns3_nic_net_up(struct net_device *netdev)
if (ret)
goto out_start_err;
+ clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+
return 0;
out_start_err:
@@ -284,6 +313,9 @@ static void hns3_nic_net_down(struct net_device *netdev)
const struct hnae3_ae_ops *ops;
int i;
+ if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+ return;
+
/* stop ae_dev */
ops = priv->ae_handle->ae_algo->ops;
if (ops->stop)
@@ -721,6 +753,58 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
}
+static int hns3_fill_desc_vtags(struct sk_buff *skb,
+ struct hns3_enet_ring *tx_ring,
+ u32 *inner_vlan_flag,
+ u32 *out_vlan_flag,
+ u16 *inner_vtag,
+ u16 *out_vtag)
+{
+#define HNS3_TX_VLAN_PRIO_SHIFT 13
+
+ if (skb->protocol == htons(ETH_P_8021Q) &&
+ !(tx_ring->tqp->handle->kinfo.netdev->features &
+ NETIF_F_HW_VLAN_CTAG_TX)) {
+ /* When HW VLAN acceleration is turned off, and the stack
+ * sets the protocol to 802.1q, the driver just need to
+ * set the protocol to the encapsulated ethertype.
+ */
+ skb->protocol = vlan_get_protocol(skb);
+ return 0;
+ }
+
+ if (skb_vlan_tag_present(skb)) {
+ u16 vlan_tag;
+
+ vlan_tag = skb_vlan_tag_get(skb);
+ vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
+
+ /* Based on hw strategy, use out_vtag in two layer tag case,
+ * and use inner_vtag in one tag case.
+ */
+ if (skb->protocol == htons(ETH_P_8021Q)) {
+ hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
+ *out_vtag = vlan_tag;
+ } else {
+ hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
+ *inner_vtag = vlan_tag;
+ }
+ } else if (skb->protocol == htons(ETH_P_8021Q)) {
+ struct vlan_ethhdr *vhdr;
+ int rc;
+
+ rc = skb_cow_head(skb, 0);
+ if (rc < 0)
+ return rc;
+ vhdr = (struct vlan_ethhdr *)skb->data;
+ vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
+ << HNS3_TX_VLAN_PRIO_SHIFT);
+ }
+
+ skb->protocol = vlan_get_protocol(skb);
+ return 0;
+}
+
static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
int size, dma_addr_t dma, int frag_end,
enum hns_desc_type type)
@@ -731,6 +815,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
u16 bdtp_fe_sc_vld_ra_ri = 0;
u32 type_cs_vlan_tso = 0;
struct sk_buff *skb;
+ u16 inner_vtag = 0;
+ u16 out_vtag = 0;
u32 paylen = 0;
u16 mss = 0;
__be16 protocol;
@@ -754,15 +840,16 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
skb = (struct sk_buff *)priv;
paylen = skb->len;
+ ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
+ &ol_type_vlan_len_msec,
+ &inner_vtag, &out_vtag);
+ if (unlikely(ret))
+ return ret;
+
if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb_reset_mac_len(skb);
protocol = skb->protocol;
- /* vlan packet*/
- if (protocol == htons(ETH_P_8021Q)) {
- protocol = vlan_get_protocol(skb);
- skb->protocol = protocol;
- }
ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
if (ret)
return ret;
@@ -788,6 +875,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
cpu_to_le32(type_cs_vlan_tso);
desc->tx.paylen = cpu_to_le32(paylen);
desc->tx.mss = cpu_to_le16(mss);
+ desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
+ desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
}
/* move ring pointer to next.*/
@@ -1029,25 +1118,50 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
static int hns3_nic_set_features(struct net_device *netdev,
netdev_features_t features)
{
+ netdev_features_t changed = netdev->features ^ features;
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int ret;
- if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
- priv->ops.fill_desc = hns3_fill_desc_tso;
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
- } else {
- priv->ops.fill_desc = hns3_fill_desc;
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
+ if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
+ if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
+ priv->ops.fill_desc = hns3_fill_desc_tso;
+ priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
+ } else {
+ priv->ops.fill_desc = hns3_fill_desc;
+ priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
+ }
+ }
+
+ if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ h->ae_algo->ops->enable_vlan_filter) {
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ h->ae_algo->ops->enable_vlan_filter(h, true);
+ else
+ h->ae_algo->ops->enable_vlan_filter(h, false);
+ }
+
+ if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
+ h->ae_algo->ops->enable_hw_strip_rxvtag) {
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true);
+ else
+ ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false);
+
+ if (ret)
+ return ret;
}
netdev->features = features;
return 0;
}
-static void
-hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+static void hns3_nic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
int queue_num = priv->ae_handle->kinfo.num_tqps;
+ struct hnae3_handle *handle = priv->ae_handle;
struct hns3_enet_ring *ring;
unsigned int start;
unsigned int idx;
@@ -1055,6 +1169,13 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
u64 rx_bytes = 0;
u64 tx_pkts = 0;
u64 rx_pkts = 0;
+ u64 tx_drop = 0;
+ u64 rx_drop = 0;
+
+ if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+ return;
+
+ handle->ae_algo->ops->update_stats(handle, &netdev->stats);
for (idx = 0; idx < queue_num; idx++) {
/* fetch the tx stats */
@@ -1063,6 +1184,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
start = u64_stats_fetch_begin_irq(&ring->syncp);
tx_bytes += ring->stats.tx_bytes;
tx_pkts += ring->stats.tx_pkts;
+ tx_drop += ring->stats.tx_busy;
+ tx_drop += ring->stats.sw_err_cnt;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
/* fetch the rx stats */
@@ -1071,6 +1194,9 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
start = u64_stats_fetch_begin_irq(&ring->syncp);
rx_bytes += ring->stats.rx_bytes;
rx_pkts += ring->stats.rx_pkts;
+ rx_drop += ring->stats.non_vld_descs;
+ rx_drop += ring->stats.err_pkt_len;
+ rx_drop += ring->stats.l2_err;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
}
@@ -1086,8 +1212,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
stats->rx_missed_errors = netdev->stats.rx_missed_errors;
stats->tx_errors = netdev->stats.tx_errors;
- stats->rx_dropped = netdev->stats.rx_dropped;
- stats->tx_dropped = netdev->stats.tx_dropped;
+ stats->rx_dropped = rx_drop + netdev->stats.rx_dropped;
+ stats->tx_dropped = tx_drop + netdev->stats.tx_dropped;
stats->collisions = netdev->stats.collisions;
stats->rx_over_errors = netdev->stats.rx_over_errors;
stats->rx_frame_errors = netdev->stats.rx_frame_errors;
@@ -1317,6 +1443,8 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
return ret;
}
+ netdev->mtu = new_mtu;
+
/* if the netdev was running earlier, bring it up again */
if (if_running && hns3_nic_net_open(netdev))
ret = -EINVAL;
@@ -1476,6 +1604,8 @@ static struct pci_driver hns3_driver = {
/* set default feature to hns3 */
static void hns3_set_default_feature(struct net_device *netdev)
{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -1490,6 +1620,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
@@ -1503,11 +1634,15 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ if (!(h->flags & HNAE3_SUPPORT_VF))
+ netdev->hw_features |=
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
}
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
@@ -2083,6 +2218,22 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
prefetchw(skb->data);
+ /* Based on hw strategy, the tag offloaded will be stored at
+ * ot_vlan_tag in two layer tag case, and stored at vlan_tag
+ * in one layer tag case.
+ */
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ u16 vlan_tag;
+
+ vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+ if (!(vlan_tag & VLAN_VID_MASK))
+ vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+ if (vlan_tag & VLAN_VID_MASK)
+ __vlan_hwaccel_put_tag(skb,
+ htons(ETH_P_8021Q),
+ vlan_tag);
+ }
+
bnum = 1;
if (length <= HNS3_RX_HEAD_SIZE) {
memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
@@ -2301,25 +2452,22 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
{
- u16 rx_int_gl, tx_int_gl;
- bool rx, tx;
-
- rx = hns3_get_new_int_gl(&tqp_vector->rx_group);
- tx = hns3_get_new_int_gl(&tqp_vector->tx_group);
- rx_int_gl = tqp_vector->rx_group.int_gl;
- tx_int_gl = tqp_vector->tx_group.int_gl;
- if (rx && tx) {
- if (rx_int_gl > tx_int_gl) {
- tqp_vector->tx_group.int_gl = rx_int_gl;
- tqp_vector->tx_group.flow_level =
- tqp_vector->rx_group.flow_level;
- hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl);
- } else {
- tqp_vector->rx_group.int_gl = tx_int_gl;
- tqp_vector->rx_group.flow_level =
- tqp_vector->tx_group.flow_level;
- hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl);
- }
+ struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
+ struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
+ bool rx_update, tx_update;
+
+ if (rx_group->gl_adapt_enable) {
+ rx_update = hns3_get_new_int_gl(rx_group);
+ if (rx_update)
+ hns3_set_vector_coalesce_rx_gl(tqp_vector,
+ rx_group->int_gl);
+ }
+
+ if (tx_group->gl_adapt_enable) {
+ tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group);
+ if (tx_update)
+ hns3_set_vector_coalesce_tx_gl(tqp_vector,
+ tx_group->int_gl);
}
}
@@ -2380,6 +2528,8 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
cur_chain->tqp_index = tx_ring->tqp->tqp_index;
hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_TX);
+ hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
cur_chain->next = NULL;
@@ -2395,6 +2545,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
chain->tqp_index = tx_ring->tqp->tqp_index;
hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_TX);
+ hnae_set_field(chain->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S,
+ HNAE3_RING_GL_TX);
cur_chain = chain;
}
@@ -2406,6 +2560,8 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
cur_chain->tqp_index = rx_ring->tqp->tqp_index;
hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_RX);
+ hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
rx_ring = rx_ring->next;
}
@@ -2419,6 +2575,9 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
chain->tqp_index = rx_ring->tqp->tqp_index;
hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_RX);
+ hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
+
cur_chain = chain;
rx_ring = rx_ring->next;
@@ -2507,7 +2666,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
tqp_vector->rx_group.total_packets = 0;
tqp_vector->tx_group.total_bytes = 0;
tqp_vector->tx_group.total_packets = 0;
- hns3_vector_gl_rl_init(tqp_vector);
+ hns3_vector_gl_rl_init(tqp_vector, priv);
tqp_vector->handle = h;
ret = hns3_get_vector_ring_chain(tqp_vector,
@@ -2649,6 +2808,19 @@ err:
return ret;
}
+static void hns3_put_ring_config(struct hns3_nic_priv *priv)
+{
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ devm_kfree(priv->dev, priv->ring_data[i].ring);
+ devm_kfree(priv->dev,
+ priv->ring_data[i + h->kinfo.num_tqps].ring);
+ }
+ devm_kfree(priv->dev, priv->ring_data);
+}
+
static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
{
int ret;
@@ -2785,8 +2957,12 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
h->ae_algo->ops->reset_queue(h, i);
hns3_fini_ring(priv->ring_data[i].ring);
+ devm_kfree(priv->dev, priv->ring_data[i].ring);
hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
+ devm_kfree(priv->dev,
+ priv->ring_data[i + h->kinfo.num_tqps].ring);
}
+ devm_kfree(priv->dev, priv->ring_data);
return 0;
}
@@ -3142,8 +3318,8 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
switch (type) {
case HNAE3_UP_CLIENT:
- ret = hns3_reset_notify_up_enet(handle);
- break;
+ ret = hns3_reset_notify_up_enet(handle);
+ break;
case HNAE3_DOWN_CLIENT:
ret = hns3_reset_notify_down_enet(handle);
break;
@@ -3160,6 +3336,115 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
return ret;
}
+static u16 hns3_get_max_available_channels(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u16 free_tqps, max_rss_size, max_tqps;
+
+ h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size);
+ max_tqps = h->kinfo.num_tc * max_rss_size;
+
+ return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps));
+}
+
+static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ int ret;
+
+ ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
+ if (ret)
+ return ret;
+
+ ret = hns3_get_ring_config(priv);
+ if (ret)
+ return ret;
+
+ ret = hns3_nic_init_vector_data(priv);
+ if (ret)
+ goto err_uninit_vector;
+
+ ret = hns3_init_all_ring(priv);
+ if (ret)
+ goto err_put_ring;
+
+ return 0;
+
+err_put_ring:
+ hns3_put_ring_config(priv);
+err_uninit_vector:
+ hns3_nic_uninit_vector_data(priv);
+ return ret;
+}
+
+static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num)
+{
+ return (new_tqp_num / num_tc) * num_tc;
+}
+
+int hns3_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_knic_private_info *kinfo = &h->kinfo;
+ bool if_running = netif_running(netdev);
+ u32 new_tqp_num = ch->combined_count;
+ u16 org_tqp_num;
+ int ret;
+
+ if (ch->rx_count || ch->tx_count)
+ return -EINVAL;
+
+ if (new_tqp_num > hns3_get_max_available_channels(netdev) ||
+ new_tqp_num < kinfo->num_tc) {
+ dev_err(&netdev->dev,
+ "Change tqps fail, the tqp range is from %d to %d",
+ kinfo->num_tc,
+ hns3_get_max_available_channels(netdev));
+ return -EINVAL;
+ }
+
+ new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num);
+ if (kinfo->num_tqps == new_tqp_num)
+ return 0;
+
+ if (if_running)
+ dev_close(netdev);
+
+ hns3_clear_all_ring(h);
+
+ ret = hns3_nic_uninit_vector_data(priv);
+ if (ret) {
+ dev_err(&netdev->dev,
+ "Unbind vector with tqp fail, nothing is changed");
+ goto open_netdev;
+ }
+
+ hns3_uninit_all_ring(priv);
+
+ org_tqp_num = h->kinfo.num_tqps;
+ ret = hns3_modify_tqp_num(netdev, new_tqp_num);
+ if (ret) {
+ ret = hns3_modify_tqp_num(netdev, org_tqp_num);
+ if (ret) {
+ /* If revert to old tqp failed, fatal error occurred */
+ dev_err(&netdev->dev,
+ "Revert to old tqp num fail, ret=%d", ret);
+ return ret;
+ }
+ dev_info(&netdev->dev,
+ "Change tqp num fail, Revert to old tqp num");
+ }
+
+open_netdev:
+ if (if_running)
+ dev_open(netdev);
+
+ return ret;
+}
+
static const struct hnae3_client_ops client_ops = {
.init_instance = hns3_client_init,
.uninit_instance = hns3_client_uninit,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 8a9de759957b..213f501b30bb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -451,10 +451,14 @@ enum hns3_link_mode_bits {
HNS3_LM_COUNT = 15
};
-#define HNS3_INT_GL_50K 0x000A
-#define HNS3_INT_GL_20K 0x0019
-#define HNS3_INT_GL_18K 0x001B
-#define HNS3_INT_GL_8K 0x003E
+#define HNS3_INT_GL_MAX 0x1FE0
+#define HNS3_INT_GL_50K 0x0014
+#define HNS3_INT_GL_20K 0x0032
+#define HNS3_INT_GL_18K 0x0036
+#define HNS3_INT_GL_8K 0x007C
+
+#define HNS3_INT_RL_MAX 0x00EC
+#define HNS3_INT_RL_ENABLE_MASK 0x40
struct hns3_enet_ring_group {
/* array of pointers to rings */
@@ -464,6 +468,7 @@ struct hns3_enet_ring_group {
u16 count;
enum hns3_flow_level_range flow_level;
u16 int_gl;
+ u8 gl_adapt_enable;
};
struct hns3_enet_tqp_vector {
@@ -594,7 +599,15 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
#define hns3_get_handle(ndev) \
(((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
+#define hns3_gl_usec_to_reg(int_gl) (int_gl >> 1)
+#define hns3_gl_round_down(int_gl) round_down(int_gl, 2)
+
+#define hns3_rl_usec_to_reg(int_rl) (int_rl >> 2)
+#define hns3_rl_round_down(int_rl) round_down(int_rl, 4)
+
void hns3_ethtool_set_ops(struct net_device *netdev);
+int hns3_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch);
bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
int hns3_init_all_ring(struct hns3_nic_priv *priv);
@@ -604,6 +617,13 @@ int hns3_clean_rx_ring(
struct hns3_enet_ring *ring, int budget,
void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *));
+void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 gl_value);
+void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 gl_value);
+void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 rl_value);
+
#ifdef CONFIG_HNS3_DCB
void hns3_dcbnl_setup(struct hnae3_handle *handle);
#else
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index a21470c72da3..b034c7f24eda 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -15,26 +15,25 @@
struct hns3_stats {
char stats_string[ETH_GSTRING_LEN];
- int stats_size;
int stats_offset;
};
/* tqp related stats */
#define HNS3_TQP_STAT(_string, _member) { \
.stats_string = _string, \
- .stats_size = FIELD_SIZEOF(struct ring_stats, _member), \
- .stats_offset = offsetof(struct hns3_enet_ring, stats), \
-} \
+ .stats_offset = offsetof(struct hns3_enet_ring, stats) +\
+ offsetof(struct ring_stats, _member), \
+}
static const struct hns3_stats hns3_txq_stats[] = {
/* Tx per-queue statistics */
- HNS3_TQP_STAT("tx_io_err_cnt", io_err_cnt),
- HNS3_TQP_STAT("tx_sw_err_cnt", sw_err_cnt),
- HNS3_TQP_STAT("tx_seg_pkt_cnt", seg_pkt_cnt),
- HNS3_TQP_STAT("tx_pkts", tx_pkts),
- HNS3_TQP_STAT("tx_bytes", tx_bytes),
- HNS3_TQP_STAT("tx_err_cnt", tx_err_cnt),
- HNS3_TQP_STAT("tx_restart_queue", restart_queue),
+ HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
+ HNS3_TQP_STAT("tx_dropped", sw_err_cnt),
+ HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
+ HNS3_TQP_STAT("packets", tx_pkts),
+ HNS3_TQP_STAT("bytes", tx_bytes),
+ HNS3_TQP_STAT("errors", tx_err_cnt),
+ HNS3_TQP_STAT("tx_wake", restart_queue),
HNS3_TQP_STAT("tx_busy", tx_busy),
};
@@ -42,18 +41,18 @@ static const struct hns3_stats hns3_txq_stats[] = {
static const struct hns3_stats hns3_rxq_stats[] = {
/* Rx per-queue statistics */
- HNS3_TQP_STAT("rx_io_err_cnt", io_err_cnt),
- HNS3_TQP_STAT("rx_sw_err_cnt", sw_err_cnt),
- HNS3_TQP_STAT("rx_seg_pkt_cnt", seg_pkt_cnt),
- HNS3_TQP_STAT("rx_pkts", rx_pkts),
- HNS3_TQP_STAT("rx_bytes", rx_bytes),
- HNS3_TQP_STAT("rx_err_cnt", rx_err_cnt),
- HNS3_TQP_STAT("rx_reuse_pg_cnt", reuse_pg_cnt),
- HNS3_TQP_STAT("rx_err_pkt_len", err_pkt_len),
- HNS3_TQP_STAT("rx_non_vld_descs", non_vld_descs),
- HNS3_TQP_STAT("rx_err_bd_num", err_bd_num),
- HNS3_TQP_STAT("rx_l2_err", l2_err),
- HNS3_TQP_STAT("rx_l3l4_csum_err", l3l4_csum_err),
+ HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
+ HNS3_TQP_STAT("rx_dropped", sw_err_cnt),
+ HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
+ HNS3_TQP_STAT("packets", rx_pkts),
+ HNS3_TQP_STAT("bytes", rx_bytes),
+ HNS3_TQP_STAT("errors", rx_err_cnt),
+ HNS3_TQP_STAT("reuse_pg_cnt", reuse_pg_cnt),
+ HNS3_TQP_STAT("err_pkt_len", err_pkt_len),
+ HNS3_TQP_STAT("non_vld_descs", non_vld_descs),
+ HNS3_TQP_STAT("err_bd_num", err_bd_num),
+ HNS3_TQP_STAT("l2_err", l2_err),
+ HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err),
};
#define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats)
@@ -389,9 +388,9 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset)
}
static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
- u32 stat_count, u32 num_tqps)
+ u32 stat_count, u32 num_tqps, const char *prefix)
{
-#define MAX_PREFIX_SIZE (8 + 4)
+#define MAX_PREFIX_SIZE (6 + 4)
u32 size_left;
u32 i, j;
u32 n1;
@@ -401,7 +400,8 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
data[ETH_GSTRING_LEN - 1] = '\0';
/* first, prepend the prefix string */
- n1 = snprintf(data, MAX_PREFIX_SIZE, "rcb_q%d_", i);
+ n1 = snprintf(data, MAX_PREFIX_SIZE, "%s#%d_",
+ prefix, i);
n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1);
size_left = (ETH_GSTRING_LEN - 1) - n1;
@@ -417,14 +417,16 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ const char tx_prefix[] = "txq";
+ const char rx_prefix[] = "rxq";
/* get strings for Tx */
data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT,
- kinfo->num_tqps);
+ kinfo->num_tqps, tx_prefix);
/* get strings for Rx */
data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT,
- kinfo->num_tqps);
+ kinfo->num_tqps, rx_prefix);
return data;
}
@@ -455,13 +457,13 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hns3_enet_ring *ring;
u8 *stat;
- u32 i;
+ int i, j;
/* get stats for Tx */
for (i = 0; i < kinfo->num_tqps; i++) {
ring = nic_priv->ring_data[i].ring;
- for (i = 0; i < HNS3_TXQ_STATS_COUNT; i++) {
- stat = (u8 *)ring + hns3_txq_stats[i].stats_offset;
+ for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) {
+ stat = (u8 *)ring + hns3_txq_stats[j].stats_offset;
*data++ = *(u64 *)stat;
}
}
@@ -469,8 +471,8 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
/* get stats for Rx */
for (i = 0; i < kinfo->num_tqps; i++) {
ring = nic_priv->ring_data[i + kinfo->num_tqps].ring;
- for (i = 0; i < HNS3_RXQ_STATS_COUNT; i++) {
- stat = (u8 *)ring + hns3_rxq_stats[i].stats_offset;
+ for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) {
+ stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset;
*data++ = *(u64 *)stat;
}
}
@@ -559,10 +561,23 @@ static void hns3_get_pauseparam(struct net_device *netdev,
&param->rx_pause, &param->tx_pause);
}
+static int hns3_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *param)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->set_pauseparam)
+ return h->ae_algo->ops->set_pauseparam(h, param->autoneg,
+ param->rx_pause,
+ param->tx_pause);
+ return -EOPNOTSUPP;
+}
+
static int hns3_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ u32 flowctrl_adv = 0;
u32 supported_caps;
u32 advertised_caps;
u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
@@ -638,6 +653,8 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
if (!cmd->base.autoneg)
advertised_caps &= ~HNS3_LM_AUTONEG_BIT;
+ advertised_caps &= ~HNS3_LM_PAUSE_BIT;
+
/* now, map driver link modes to ethtool link modes */
hns3_driv_to_eth_caps(supported_caps, cmd, false);
hns3_driv_to_eth_caps(advertised_caps, cmd, true);
@@ -650,6 +667,18 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
/* 4.mdio_support */
cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22;
+ /* 5.get flow control setttings */
+ if (h->ae_algo->ops->get_flowctrl_adv)
+ h->ae_algo->ops->get_flowctrl_adv(h, &flowctrl_adv);
+
+ if (flowctrl_adv & ADVERTISED_Pause)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Pause);
+
+ if (flowctrl_adv & ADVERTISED_Asym_Pause)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Asym_Pause);
+
return 0;
}
@@ -730,7 +759,7 @@ static int hns3_get_rxnfc(struct net_device *netdev,
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
- cmd->data = h->kinfo.num_tc * h->kinfo.rss_size;
+ cmd->data = h->kinfo.rss_size;
break;
case ETHTOOL_GRXFH:
return h->ae_algo->ops->get_rss_tuple(h, cmd);
@@ -849,6 +878,241 @@ static int hns3_nway_reset(struct net_device *netdev)
return genphy_restart_aneg(phy);
}
+static void hns3_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->get_channels)
+ h->ae_algo->ops->get_channels(h, ch);
+}
+
+static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
+ struct ethtool_coalesce *cmd)
+{
+ struct hns3_enet_tqp_vector *tx_vector, *rx_vector;
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u16 queue_num = h->kinfo.num_tqps;
+
+ if (queue >= queue_num) {
+ netdev_err(netdev,
+ "Invalid queue value %d! Queue max id=%d\n",
+ queue, queue_num - 1);
+ return -EINVAL;
+ }
+
+ tx_vector = priv->ring_data[queue].ring->tqp_vector;
+ rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
+
+ cmd->use_adaptive_tx_coalesce = tx_vector->tx_group.gl_adapt_enable;
+ cmd->use_adaptive_rx_coalesce = rx_vector->rx_group.gl_adapt_enable;
+
+ cmd->tx_coalesce_usecs = tx_vector->tx_group.int_gl;
+ cmd->rx_coalesce_usecs = rx_vector->rx_group.int_gl;
+
+ cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting;
+ cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting;
+
+ return 0;
+}
+
+static int hns3_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ return hns3_get_coalesce_per_queue(netdev, 0, cmd);
+}
+
+static int hns3_check_gl_coalesce_para(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ u32 rx_gl, tx_gl;
+
+ if (cmd->rx_coalesce_usecs > HNS3_INT_GL_MAX) {
+ netdev_err(netdev,
+ "Invalid rx-usecs value, rx-usecs range is 0-%d\n",
+ HNS3_INT_GL_MAX);
+ return -EINVAL;
+ }
+
+ if (cmd->tx_coalesce_usecs > HNS3_INT_GL_MAX) {
+ netdev_err(netdev,
+ "Invalid tx-usecs value, tx-usecs range is 0-%d\n",
+ HNS3_INT_GL_MAX);
+ return -EINVAL;
+ }
+
+ rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs);
+ if (rx_gl != cmd->rx_coalesce_usecs) {
+ netdev_info(netdev,
+ "rx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n",
+ cmd->rx_coalesce_usecs, rx_gl);
+ }
+
+ tx_gl = hns3_gl_round_down(cmd->tx_coalesce_usecs);
+ if (tx_gl != cmd->tx_coalesce_usecs) {
+ netdev_info(netdev,
+ "tx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n",
+ cmd->tx_coalesce_usecs, tx_gl);
+ }
+
+ return 0;
+}
+
+static int hns3_check_rl_coalesce_para(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ u32 rl;
+
+ if (cmd->tx_coalesce_usecs_high != cmd->rx_coalesce_usecs_high) {
+ netdev_err(netdev,
+ "tx_usecs_high must be same as rx_usecs_high.\n");
+ return -EINVAL;
+ }
+
+ if (cmd->rx_coalesce_usecs_high > HNS3_INT_RL_MAX) {
+ netdev_err(netdev,
+ "Invalid usecs_high value, usecs_high range is 0-%d\n",
+ HNS3_INT_RL_MAX);
+ return -EINVAL;
+ }
+
+ rl = hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
+ if (rl != cmd->rx_coalesce_usecs_high) {
+ netdev_info(netdev,
+ "usecs_high(%d) rounded down to %d, because it must be multiple of 4.\n",
+ cmd->rx_coalesce_usecs_high, rl);
+ }
+
+ return 0;
+}
+
+static int hns3_check_coalesce_para(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ int ret;
+
+ ret = hns3_check_gl_coalesce_para(netdev, cmd);
+ if (ret) {
+ netdev_err(netdev,
+ "Check gl coalesce param fail. ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = hns3_check_rl_coalesce_para(netdev, cmd);
+ if (ret) {
+ netdev_err(netdev,
+ "Check rl coalesce param fail. ret = %d\n", ret);
+ return ret;
+ }
+
+ if (cmd->use_adaptive_tx_coalesce == 1 ||
+ cmd->use_adaptive_rx_coalesce == 1) {
+ netdev_info(netdev,
+ "adaptive-tx=%d and adaptive-rx=%d, tx_usecs or rx_usecs will changed dynamically.\n",
+ cmd->use_adaptive_tx_coalesce,
+ cmd->use_adaptive_rx_coalesce);
+ }
+
+ return 0;
+}
+
+static void hns3_set_coalesce_per_queue(struct net_device *netdev,
+ struct ethtool_coalesce *cmd,
+ u32 queue)
+{
+ struct hns3_enet_tqp_vector *tx_vector, *rx_vector;
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int queue_num = h->kinfo.num_tqps;
+
+ tx_vector = priv->ring_data[queue].ring->tqp_vector;
+ rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
+
+ tx_vector->tx_group.gl_adapt_enable = cmd->use_adaptive_tx_coalesce;
+ rx_vector->rx_group.gl_adapt_enable = cmd->use_adaptive_rx_coalesce;
+
+ tx_vector->tx_group.int_gl = cmd->tx_coalesce_usecs;
+ rx_vector->rx_group.int_gl = cmd->rx_coalesce_usecs;
+
+ hns3_set_vector_coalesce_tx_gl(tx_vector, tx_vector->tx_group.int_gl);
+ hns3_set_vector_coalesce_rx_gl(rx_vector, rx_vector->rx_group.int_gl);
+
+ hns3_set_vector_coalesce_rl(tx_vector, h->kinfo.int_rl_setting);
+ hns3_set_vector_coalesce_rl(rx_vector, h->kinfo.int_rl_setting);
+}
+
+static int hns3_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u16 queue_num = h->kinfo.num_tqps;
+ int ret;
+ int i;
+
+ ret = hns3_check_coalesce_para(netdev, cmd);
+ if (ret)
+ return ret;
+
+ h->kinfo.int_rl_setting =
+ hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
+
+ for (i = 0; i < queue_num; i++)
+ hns3_set_coalesce_per_queue(netdev, cmd, i);
+
+ return 0;
+}
+
+static int hns3_get_regs_len(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo->ops->get_regs_len)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->get_regs_len(h);
+}
+
+static void hns3_get_regs(struct net_device *netdev,
+ struct ethtool_regs *cmd, void *data)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo->ops->get_regs)
+ return;
+
+ h->ae_algo->ops->get_regs(h, &cmd->version, data);
+}
+
+static int hns3_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_led_id)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->set_led_id(h, state);
+}
+
+static const struct ethtool_ops hns3vf_ethtool_ops = {
+ .get_drvinfo = hns3_get_drvinfo,
+ .get_ringparam = hns3_get_ringparam,
+ .set_ringparam = hns3_set_ringparam,
+ .get_strings = hns3_get_strings,
+ .get_ethtool_stats = hns3_get_stats,
+ .get_sset_count = hns3_get_sset_count,
+ .get_rxnfc = hns3_get_rxnfc,
+ .get_rxfh_key_size = hns3_get_rss_key_size,
+ .get_rxfh_indir_size = hns3_get_rss_indir_size,
+ .get_rxfh = hns3_get_rss,
+ .set_rxfh = hns3_set_rss,
+ .get_link_ksettings = hns3_get_link_ksettings,
+ .get_channels = hns3_get_channels,
+ .get_coalesce = hns3_get_coalesce,
+ .set_coalesce = hns3_set_coalesce,
+};
+
static const struct ethtool_ops hns3_ethtool_ops = {
.self_test = hns3_self_test,
.get_drvinfo = hns3_get_drvinfo,
@@ -856,6 +1120,7 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.get_ringparam = hns3_get_ringparam,
.set_ringparam = hns3_set_ringparam,
.get_pauseparam = hns3_get_pauseparam,
+ .set_pauseparam = hns3_set_pauseparam,
.get_strings = hns3_get_strings,
.get_ethtool_stats = hns3_get_stats,
.get_sset_count = hns3_get_sset_count,
@@ -868,9 +1133,21 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.get_link_ksettings = hns3_get_link_ksettings,
.set_link_ksettings = hns3_set_link_ksettings,
.nway_reset = hns3_nway_reset,
+ .get_channels = hns3_get_channels,
+ .set_channels = hns3_set_channels,
+ .get_coalesce = hns3_get_coalesce,
+ .set_coalesce = hns3_set_coalesce,
+ .get_regs_len = hns3_get_regs_len,
+ .get_regs = hns3_get_regs,
+ .set_phys_id = hns3_set_phys_id,
};
void hns3_ethtool_set_ops(struct net_device *netdev)
{
- netdev->ethtool_ops = &hns3_ethtool_ops;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->flags & HNAE3_SUPPORT_VF)
+ netdev->ethtool_ops = &hns3vf_ethtool_ops;
+ else
+ netdev->ethtool_ops = &hns3_ethtool_ops;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index d2b20d01a58c..cb8ddd043476 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0+
#
# Makefile for the HISILICON network device drivers.
#
@@ -5,11 +6,6 @@
ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
-hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o
+hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o
hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o
-
-obj-$(CONFIG_HNS3_ENET) += hns3.o
-hns3-objs = hns3_enet.o hns3_ethtool.o
-
-hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index ce5ed8845042..3fd10a6bec53 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -102,6 +102,10 @@ enum hclge_opcode_type {
HCLGE_OPC_STATS_64_BIT = 0x0030,
HCLGE_OPC_STATS_32_BIT = 0x0031,
HCLGE_OPC_STATS_MAC = 0x0032,
+
+ HCLGE_OPC_QUERY_REG_NUM = 0x0040,
+ HCLGE_OPC_QUERY_32_BIT_REG = 0x0041,
+ HCLGE_OPC_QUERY_64_BIT_REG = 0x0042,
/* Device management command */
/* MAC commond */
@@ -111,6 +115,7 @@ enum hclge_opcode_type {
HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
+ HCLGE_OPC_STATS_MAC_TRAFFIC = 0x0314,
/* MACSEC command */
/* PFC/Pause CMD*/
@@ -180,6 +185,10 @@ enum hclge_opcode_type {
/* Promisuous mode command */
HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01,
+ /* Vlan offload command */
+ HCLGE_OPC_VLAN_PORT_TX_CFG = 0x0F01,
+ HCLGE_OPC_VLAN_PORT_RX_CFG = 0x0F02,
+
/* Interrupts cmd */
HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503,
HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504,
@@ -191,6 +200,7 @@ enum hclge_opcode_type {
HCLGE_OPC_MAC_VLAN_INSERT = 0x1003,
HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010,
HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
+ HCLGE_OPC_MAC_VLAN_MASK_SET = 0x1012,
/* Multicast linear table cmd */
HCLGE_OPC_MTA_MAC_MODE_CFG = 0x1020,
@@ -218,6 +228,9 @@ enum hclge_opcode_type {
/* Mailbox cmd */
HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000,
+
+ /* Led command */
+ HCLGE_OPC_LED_STATUS_CFG = 0xB000,
};
#define HCLGE_TQP_REG_OFFSET 0x80000
@@ -399,6 +412,8 @@ struct hclge_pf_res_cmd {
#define HCLGE_CFG_MAC_ADDR_H_M GENMASK(15, 0)
#define HCLGE_CFG_DEFAULT_SPEED_S 16
#define HCLGE_CFG_DEFAULT_SPEED_M GENMASK(23, 16)
+#define HCLGE_CFG_RSS_SIZE_S 24
+#define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24)
struct hclge_cfg_param_cmd {
__le32 offset;
@@ -549,8 +564,6 @@ struct hclge_config_auto_neg_cmd {
u8 rsv[20];
};
-#define HCLGE_MAC_MIN_MTU 64
-#define HCLGE_MAC_MAX_MTU 9728
#define HCLGE_MAC_UPLINK_PORT 0x100
struct hclge_config_max_frm_size_cmd {
@@ -587,6 +600,37 @@ struct hclge_mac_vlan_tbl_entry_cmd {
u8 rsv2[6];
};
+#define HCLGE_VLAN_MASK_EN_B 0x0
+struct hclge_mac_vlan_mask_entry_cmd {
+ u8 rsv0[2];
+ u8 vlan_mask;
+ u8 rsv1;
+ u8 mac_mask[6];
+ u8 rsv2[14];
+};
+
+#define HCLGE_MAC_MGR_MASK_VLAN_B BIT(0)
+#define HCLGE_MAC_MGR_MASK_MAC_B BIT(1)
+#define HCLGE_MAC_MGR_MASK_ETHERTYPE_B BIT(2)
+#define HCLGE_MAC_ETHERTYPE_LLDP 0x88cc
+
+struct hclge_mac_mgr_tbl_entry_cmd {
+ u8 flags;
+ u8 resp_code;
+ __le16 vlan_tag;
+ __le32 mac_addr_hi32;
+ __le16 mac_addr_lo16;
+ __le16 rsv1;
+ __le16 ethter_type;
+ __le16 egress_port;
+ __le16 egress_queue;
+ u8 sw_port_id_aware;
+ u8 rsv2;
+ u8 i_port_bitmap;
+ u8 i_port_direction;
+ u8 rsv3[2];
+};
+
#define HCLGE_CFG_MTA_MAC_SEL_S 0x0
#define HCLGE_CFG_MTA_MAC_SEL_M GENMASK(1, 0)
#define HCLGE_CFG_MTA_MAC_EN_B 0x7
@@ -658,6 +702,47 @@ struct hclge_vlan_filter_vf_cfg_cmd {
u8 vf_bitmap[16];
};
+#define HCLGE_ACCEPT_TAG_B 0
+#define HCLGE_ACCEPT_UNTAG_B 1
+#define HCLGE_PORT_INS_TAG1_EN_B 2
+#define HCLGE_PORT_INS_TAG2_EN_B 3
+#define HCLGE_CFG_NIC_ROCE_SEL_B 4
+struct hclge_vport_vtag_tx_cfg_cmd {
+ u8 vport_vlan_cfg;
+ u8 vf_offset;
+ u8 rsv1[2];
+ __le16 def_vlan_tag1;
+ __le16 def_vlan_tag2;
+ u8 vf_bitmap[8];
+ u8 rsv2[8];
+};
+
+#define HCLGE_REM_TAG1_EN_B 0
+#define HCLGE_REM_TAG2_EN_B 1
+#define HCLGE_SHOW_TAG1_EN_B 2
+#define HCLGE_SHOW_TAG2_EN_B 3
+struct hclge_vport_vtag_rx_cfg_cmd {
+ u8 vport_vlan_cfg;
+ u8 vf_offset;
+ u8 rsv1[6];
+ u8 vf_bitmap[8];
+ u8 rsv2[8];
+};
+
+struct hclge_tx_vlan_type_cfg_cmd {
+ __le16 ot_vlan_type;
+ __le16 in_vlan_type;
+ u8 rsv[20];
+};
+
+struct hclge_rx_vlan_type_cfg_cmd {
+ __le16 ot_fst_vlan_type;
+ __le16 ot_sec_vlan_type;
+ __le16 in_fst_vlan_type;
+ __le16 in_sec_vlan_type;
+ u8 rsv[16];
+};
+
struct hclge_cfg_com_tqp_queue_cmd {
__le16 tqp_id;
__le16 stream_id;
@@ -726,6 +811,23 @@ struct hclge_reset_cmd {
#define HCLGE_NIC_CMQ_DESC_NUM 1024
#define HCLGE_NIC_CMQ_DESC_NUM_S 3
+#define HCLGE_LED_PORT_SPEED_STATE_S 0
+#define HCLGE_LED_PORT_SPEED_STATE_M GENMASK(5, 0)
+#define HCLGE_LED_ACTIVITY_STATE_S 0
+#define HCLGE_LED_ACTIVITY_STATE_M GENMASK(1, 0)
+#define HCLGE_LED_LINK_STATE_S 0
+#define HCLGE_LED_LINK_STATE_M GENMASK(1, 0)
+#define HCLGE_LED_LOCATE_STATE_S 0
+#define HCLGE_LED_LOCATE_STATE_M GENMASK(1, 0)
+
+struct hclge_set_led_state_cmd {
+ u8 port_speed_led_config;
+ u8 link_led_config;
+ u8 activity_led_config;
+ u8 locate_led_config;
+ u8 rsv[20];
+};
+
int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 59ed806a52c3..32bc6f68e297 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -17,10 +17,12 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
-
+#include <linux/if_vlan.h>
+#include <net/rtnetlink.h>
#include "hclge_cmd.h"
#include "hclge_dcb.h"
#include "hclge_main.h"
+#include "hclge_mbx.h"
#include "hclge_mdio.h"
#include "hclge_tm.h"
#include "hnae3.h"
@@ -34,8 +36,10 @@
static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
enum hclge_mta_dmac_sel_type mta_mac_sel,
bool enable);
+static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
+static int hclge_update_led_status(struct hclge_dev *hdev);
static struct hnae3_ae_algo ae_algo;
@@ -278,8 +282,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
{"mac_tx_undersize_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
- {"mac_tx_overrsize_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num)},
+ {"mac_tx_oversize_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
{"mac_tx_64_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
{"mac_tx_65_127_oct_pkt_num",
@@ -292,8 +296,24 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
{"mac_tx_1024_1518_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
- {"mac_tx_1519_max_oct_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num)},
+ {"mac_tx_1519_2047_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
+ {"mac_tx_2048_4095_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
+ {"mac_tx_4096_8191_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
+ {"mac_tx_8192_12287_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_12287_oct_pkt_num)},
+ {"mac_tx_8192_9216_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
+ {"mac_tx_9217_12287_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
+ {"mac_tx_12288_16383_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
+ {"mac_tx_1519_max_good_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
+ {"mac_tx_1519_max_bad_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
{"mac_rx_total_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
{"mac_rx_total_oct_num",
@@ -314,8 +334,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
{"mac_rx_undersize_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
- {"mac_rx_overrsize_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num)},
+ {"mac_rx_oversize_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
{"mac_rx_64_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
{"mac_rx_65_127_oct_pkt_num",
@@ -328,33 +348,59 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
{"mac_rx_1024_1518_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
- {"mac_rx_1519_max_oct_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num)},
-
- {"mac_trans_fragment_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num)},
- {"mac_trans_undermin_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num)},
- {"mac_trans_jabber_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num)},
- {"mac_trans_err_all_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num)},
- {"mac_trans_from_app_good_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num)},
- {"mac_trans_from_app_bad_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num)},
- {"mac_rcv_fragment_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num)},
- {"mac_rcv_undermin_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num)},
- {"mac_rcv_jabber_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num)},
- {"mac_rcv_fcs_err_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num)},
- {"mac_rcv_send_app_good_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num)},
- {"mac_rcv_send_app_bad_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num)}
+ {"mac_rx_1519_2047_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
+ {"mac_rx_2048_4095_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
+ {"mac_rx_4096_8191_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
+ {"mac_rx_8192_12287_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_12287_oct_pkt_num)},
+ {"mac_rx_8192_9216_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
+ {"mac_rx_9217_12287_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
+ {"mac_rx_12288_16383_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
+ {"mac_rx_1519_max_good_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
+ {"mac_rx_1519_max_bad_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
+
+ {"mac_tx_fragment_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
+ {"mac_tx_undermin_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
+ {"mac_tx_jabber_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
+ {"mac_tx_err_all_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
+ {"mac_tx_from_app_good_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
+ {"mac_tx_from_app_bad_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
+ {"mac_rx_fragment_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
+ {"mac_rx_undermin_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
+ {"mac_rx_jabber_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
+ {"mac_rx_fcs_err_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
+ {"mac_rx_send_app_good_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
+ {"mac_rx_send_app_bad_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
+};
+
+static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
+ {
+ .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
+ .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
+ .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
+ .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
+ .i_port_bitmap = 0x1,
+ },
};
static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
@@ -460,9 +506,41 @@ static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
return 0;
}
+static int hclge_mac_get_traffic_stats(struct hclge_dev *hdev)
+{
+ struct hclge_mac_stats *mac_stats = &hdev->hw_stats.mac_stats;
+ struct hclge_desc desc;
+ __le64 *desc_data;
+ int ret;
+
+ /* for fiber port, need to query the total rx/tx packets statstics,
+ * used for data transferring checking.
+ */
+ if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
+ return 0;
+
+ if (test_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_STATS_MAC_TRAFFIC, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get MAC total pkt stats fail, ret = %d\n", ret);
+
+ return ret;
+ }
+
+ desc_data = (__le64 *)(&desc.data[0]);
+ mac_stats->mac_tx_total_pkt_num += le64_to_cpu(*desc_data++);
+ mac_stats->mac_rx_total_pkt_num += le64_to_cpu(*desc_data);
+
+ return 0;
+}
+
static int hclge_mac_update_stats(struct hclge_dev *hdev)
{
-#define HCLGE_MAC_CMD_NUM 17
+#define HCLGE_MAC_CMD_NUM 21
#define HCLGE_RTN_DATA_NUM 4
u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
@@ -524,7 +602,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
return ret;
}
tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
- le32_to_cpu(desc[0].data[4]);
+ le32_to_cpu(desc[0].data[1]);
}
for (i = 0; i < kinfo->num_tqps; i++) {
@@ -544,7 +622,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
return ret;
}
tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
- le32_to_cpu(desc[0].data[4]);
+ le32_to_cpu(desc[0].data[1]);
}
return 0;
@@ -586,7 +664,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
tqp->index);
buff = buff + ETH_GSTRING_LEN;
}
@@ -594,7 +672,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
tqp->index);
buff = buff + ETH_GSTRING_LEN;
}
@@ -642,23 +720,22 @@ static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num;
net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num;
- net_stats->rx_errors = hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
+ net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
- net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_err_pkt;
net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt;
net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt;
- net_stats->rx_errors += hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
+ net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
- net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
+ net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
net_stats->rx_length_errors =
hw_stats->mac_stats.mac_rx_undersize_pkt_num;
net_stats->rx_length_errors +=
- hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
+ hw_stats->mac_stats.mac_rx_oversize_pkt_num;
net_stats->rx_over_errors =
- hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
+ hw_stats->mac_stats.mac_rx_oversize_pkt_num;
}
static void hclge_update_stats_for_all(struct hclge_dev *hdev)
@@ -698,6 +775,9 @@ static void hclge_update_stats(struct hnae3_handle *handle,
struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
int status;
+ if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
+ return;
+
status = hclge_mac_update_stats(hdev);
if (status)
dev_err(&hdev->pdev->dev,
@@ -723,6 +803,8 @@ static void hclge_update_stats(struct hnae3_handle *handle,
status);
hclge_update_netstat(hw_stats, net_stats);
+
+ clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
}
static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
@@ -981,6 +1063,10 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]),
HCLGE_CFG_DEFAULT_SPEED_M,
HCLGE_CFG_DEFAULT_SPEED_S);
+ cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]),
+ HCLGE_CFG_RSS_SIZE_M,
+ HCLGE_CFG_RSS_SIZE_S);
+
for (i = 0; i < ETH_ALEN; i++)
cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
@@ -1058,7 +1144,7 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->num_vmdq_vport = cfg.vmdq_vport_num;
hdev->base_tqp_pid = 0;
- hdev->rss_size_max = 1;
+ hdev->rss_size_max = cfg.rss_size_max;
hdev->rx_buf_len = cfg.rx_buf_len;
ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
hdev->hw.mac.media_type = cfg.media_type;
@@ -1095,10 +1181,7 @@ static int hclge_configure(struct hclge_dev *hdev)
for (i = 0; i < hdev->tm_info.num_tc; i++)
hnae_set_bit(hdev->hw_tc_map, i, 1);
- if (!hdev->num_vmdq_vport && !hdev->num_req_vfs)
- hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
- else
- hdev->tx_sch_mode = HCLGE_FLAG_VNET_BASE_SCH_MODE;
+ hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
return ret;
}
@@ -2132,28 +2215,6 @@ static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
return 0;
}
-static int hclge_query_autoneg_result(struct hclge_dev *hdev)
-{
- struct hclge_mac *mac = &hdev->hw.mac;
- struct hclge_query_an_speed_dup_cmd *req;
- struct hclge_desc desc;
- int ret;
-
- req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "autoneg result query cmd failed %d.\n", ret);
- return ret;
- }
-
- mac->autoneg = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_AN_B);
-
- return 0;
-}
-
static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
{
struct hclge_config_auto_neg_cmd *req;
@@ -2189,15 +2250,45 @@ static int hclge_get_autoneg(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
- hclge_query_autoneg_result(hdev);
+ if (phydev)
+ return phydev->autoneg;
return hdev->hw.mac.autoneg;
}
+static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
+ bool mask_vlan,
+ u8 *mac_mask)
+{
+ struct hclge_mac_vlan_mask_entry_cmd *req;
+ struct hclge_desc desc;
+ int status;
+
+ req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
+
+ hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
+ mask_vlan ? 1 : 0);
+ ether_addr_copy(req->mac_mask, mac_mask);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Config mac_vlan_mask failed for cmd_send, ret =%d\n",
+ status);
+
+ return status;
+}
+
static int hclge_mac_init(struct hclge_dev *hdev)
{
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
+ struct net_device *netdev = handle->kinfo.netdev;
struct hclge_mac *mac = &hdev->hw.mac;
+ u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ int mtu;
int ret;
ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
@@ -2223,7 +2314,45 @@ static int hclge_mac_init(struct hclge_dev *hdev)
return ret;
}
- return hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
+ ret = hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set mta filter mode fail ret=%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set default mac_vlan_mask fail ret=%d\n", ret);
+ return ret;
+ }
+
+ if (netdev)
+ mtu = netdev->mtu;
+ else
+ mtu = ETH_DATA_LEN;
+
+ ret = hclge_set_mtu(handle, mtu);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set mtu failed ret=%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
+{
+ if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
+ schedule_work(&hdev->mbx_service_task);
+}
+
+static void hclge_reset_task_schedule(struct hclge_dev *hdev)
+{
+ if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
+ schedule_work(&hdev->rst_service_task);
}
static void hclge_task_schedule(struct hclge_dev *hdev)
@@ -2350,6 +2479,7 @@ static void hclge_service_timer(struct timer_list *t)
struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
mod_timer(&hdev->service_timer, jiffies + HZ);
+ hdev->hw_stats.stats_timer++;
hclge_task_schedule(hdev);
}
@@ -2362,6 +2492,64 @@ static void hclge_service_complete(struct hclge_dev *hdev)
clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
}
+static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
+{
+ u32 rst_src_reg;
+ u32 cmdq_src_reg;
+
+ /* fetch the events from their corresponding regs */
+ rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
+ cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
+
+ /* Assumption: If by any chance reset and mailbox events are reported
+ * together then we will only process reset event in this go and will
+ * defer the processing of the mailbox events. Since, we would have not
+ * cleared RX CMDQ event this time we would receive again another
+ * interrupt from H/W just for the mailbox.
+ */
+
+ /* check for vector0 reset event sources */
+ if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
+ set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
+ *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
+ return HCLGE_VECTOR0_EVENT_RST;
+ }
+
+ if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
+ set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
+ *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
+ return HCLGE_VECTOR0_EVENT_RST;
+ }
+
+ if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
+ set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
+ *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
+ return HCLGE_VECTOR0_EVENT_RST;
+ }
+
+ /* check for vector0 mailbox(=CMDQ RX) event source */
+ if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
+ cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
+ *clearval = cmdq_src_reg;
+ return HCLGE_VECTOR0_EVENT_MBX;
+ }
+
+ return HCLGE_VECTOR0_EVENT_OTHER;
+}
+
+static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
+ u32 regclr)
+{
+ switch (event_type) {
+ case HCLGE_VECTOR0_EVENT_RST:
+ hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
+ break;
+ case HCLGE_VECTOR0_EVENT_MBX:
+ hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
+ break;
+ }
+}
+
static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
{
writel(enable ? 1 : 0, vector->addr);
@@ -2370,10 +2558,38 @@ static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
{
struct hclge_dev *hdev = data;
+ u32 event_cause;
+ u32 clearval;
hclge_enable_vector(&hdev->misc_vector, false);
- if (!test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
- schedule_work(&hdev->service_task);
+ event_cause = hclge_check_event_cause(hdev, &clearval);
+
+ /* vector 0 interrupt is shared with reset and mailbox source events.*/
+ switch (event_cause) {
+ case HCLGE_VECTOR0_EVENT_RST:
+ hclge_reset_task_schedule(hdev);
+ break;
+ case HCLGE_VECTOR0_EVENT_MBX:
+ /* If we are here then,
+ * 1. Either we are not handling any mbx task and we are not
+ * scheduled as well
+ * OR
+ * 2. We could be handling a mbx task but nothing more is
+ * scheduled.
+ * In both cases, we should schedule mbx task as there are more
+ * mbx messages reported by this interrupt.
+ */
+ hclge_mbx_task_schedule(hdev);
+
+ default:
+ dev_dbg(&hdev->pdev->dev,
+ "received unknown or unhandled event of vector0\n");
+ break;
+ }
+
+ /* we should clear the source of interrupt */
+ hclge_clear_event_cause(hdev, event_cause, clearval);
+ hclge_enable_vector(&hdev->misc_vector, true);
return IRQ_HANDLED;
}
@@ -2404,9 +2620,9 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
hclge_get_misc_vector(hdev);
- ret = devm_request_irq(&hdev->pdev->dev,
- hdev->misc_vector.vector_irq,
- hclge_misc_irq_handle, 0, "hclge_misc", hdev);
+ /* this would be explicitly freed in the end */
+ ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
+ 0, "hclge_misc", hdev);
if (ret) {
hclge_free_vector(hdev, 0);
dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
@@ -2416,6 +2632,12 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
return ret;
}
+static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
+{
+ free_irq(hdev->misc_vector.vector_irq, hdev);
+ hclge_free_vector(hdev, 0);
+}
+
static int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type)
{
@@ -2471,12 +2693,6 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
cnt++;
}
- /* must clear reset status register to
- * prevent driver detect reset interrupt again
- */
- reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
- hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, reg);
-
if (cnt >= HCLGE_RESET_WAIT_CNT) {
dev_warn(&hdev->pdev->dev,
"Wait for reset timeout: %d\n", hdev->reset_type);
@@ -2505,12 +2721,12 @@ static int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
return ret;
}
-static void hclge_do_reset(struct hclge_dev *hdev, enum hnae3_reset_type type)
+static void hclge_do_reset(struct hclge_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
u32 val;
- switch (type) {
+ switch (hdev->reset_type) {
case HNAE3_GLOBAL_RESET:
val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
@@ -2526,30 +2742,62 @@ static void hclge_do_reset(struct hclge_dev *hdev, enum hnae3_reset_type type)
case HNAE3_FUNC_RESET:
dev_info(&pdev->dev, "PF Reset requested\n");
hclge_func_reset_cmd(hdev, 0);
+ /* schedule again to check later */
+ set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
+ hclge_reset_task_schedule(hdev);
break;
default:
dev_warn(&pdev->dev,
- "Unsupported reset type: %d\n", type);
+ "Unsupported reset type: %d\n", hdev->reset_type);
break;
}
}
-static enum hnae3_reset_type hclge_detected_reset_event(struct hclge_dev *hdev)
+static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
+ unsigned long *addr)
{
enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
- u32 rst_reg_val;
- rst_reg_val = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
- if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_reg_val)
+ /* return the highest priority reset level amongst all */
+ if (test_bit(HNAE3_GLOBAL_RESET, addr))
rst_level = HNAE3_GLOBAL_RESET;
- else if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_reg_val)
+ else if (test_bit(HNAE3_CORE_RESET, addr))
rst_level = HNAE3_CORE_RESET;
- else if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_reg_val)
+ else if (test_bit(HNAE3_IMP_RESET, addr))
rst_level = HNAE3_IMP_RESET;
+ else if (test_bit(HNAE3_FUNC_RESET, addr))
+ rst_level = HNAE3_FUNC_RESET;
+
+ /* now, clear all other resets */
+ clear_bit(HNAE3_GLOBAL_RESET, addr);
+ clear_bit(HNAE3_CORE_RESET, addr);
+ clear_bit(HNAE3_IMP_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
return rst_level;
}
+static void hclge_reset(struct hclge_dev *hdev)
+{
+ /* perform reset of the stack & ae device for a client */
+
+ hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+
+ if (!hclge_reset_wait(hdev)) {
+ rtnl_lock();
+ hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ hclge_reset_ae_dev(hdev->ae_dev);
+ hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ rtnl_unlock();
+ } else {
+ /* schedule again to check pending resets later */
+ set_bit(hdev->reset_type, &hdev->reset_pending);
+ hclge_reset_task_schedule(hdev);
+ }
+
+ hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+}
+
static void hclge_reset_event(struct hnae3_handle *handle,
enum hnae3_reset_type reset)
{
@@ -2563,14 +2811,9 @@ static void hclge_reset_event(struct hnae3_handle *handle,
case HNAE3_FUNC_RESET:
case HNAE3_CORE_RESET:
case HNAE3_GLOBAL_RESET:
- if (test_bit(HCLGE_STATE_RESET_INT, &hdev->state)) {
- dev_err(&hdev->pdev->dev, "Already in reset state");
- return;
- }
- hdev->reset_type = reset;
- set_bit(HCLGE_STATE_RESET_INT, &hdev->state);
- set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
- schedule_work(&hdev->service_task);
+ /* request reset & schedule reset task */
+ set_bit(reset, &hdev->reset_request);
+ hclge_reset_task_schedule(hdev);
break;
default:
dev_warn(&hdev->pdev->dev, "Unsupported reset event:%d", reset);
@@ -2580,49 +2823,55 @@ static void hclge_reset_event(struct hnae3_handle *handle,
static void hclge_reset_subtask(struct hclge_dev *hdev)
{
- bool do_reset;
+ /* check if there is any ongoing reset in the hardware. This status can
+ * be checked from reset_pending. If there is then, we need to wait for
+ * hardware to complete reset.
+ * a. If we are able to figure out in reasonable time that hardware
+ * has fully resetted then, we can proceed with driver, client
+ * reset.
+ * b. else, we can come back later to check this status so re-sched
+ * now.
+ */
+ hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
+ if (hdev->reset_type != HNAE3_NONE_RESET)
+ hclge_reset(hdev);
- do_reset = hdev->reset_type != HNAE3_NONE_RESET;
+ /* check if we got any *new* reset requests to be honored */
+ hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
+ if (hdev->reset_type != HNAE3_NONE_RESET)
+ hclge_do_reset(hdev);
- /* Reset is detected by interrupt */
- if (hdev->reset_type == HNAE3_NONE_RESET)
- hdev->reset_type = hclge_detected_reset_event(hdev);
+ hdev->reset_type = HNAE3_NONE_RESET;
+}
- if (hdev->reset_type == HNAE3_NONE_RESET)
+static void hclge_reset_service_task(struct work_struct *work)
+{
+ struct hclge_dev *hdev =
+ container_of(work, struct hclge_dev, rst_service_task);
+
+ if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
return;
- switch (hdev->reset_type) {
- case HNAE3_FUNC_RESET:
- case HNAE3_CORE_RESET:
- case HNAE3_GLOBAL_RESET:
- case HNAE3_IMP_RESET:
- hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
- if (do_reset)
- hclge_do_reset(hdev, hdev->reset_type);
- else
- set_bit(HCLGE_STATE_RESET_INT, &hdev->state);
+ hclge_reset_subtask(hdev);
- if (!hclge_reset_wait(hdev)) {
- hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
- hclge_reset_ae_dev(hdev->ae_dev);
- hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
- clear_bit(HCLGE_STATE_RESET_INT, &hdev->state);
- }
- hclge_notify_client(hdev, HNAE3_UP_CLIENT);
- break;
- default:
- dev_err(&hdev->pdev->dev, "Unsupported reset type:%d\n",
- hdev->reset_type);
- break;
- }
- hdev->reset_type = HNAE3_NONE_RESET;
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
}
-static void hclge_misc_irq_service_task(struct hclge_dev *hdev)
+static void hclge_mailbox_service_task(struct work_struct *work)
{
- hclge_reset_subtask(hdev);
- hclge_enable_vector(&hdev->misc_vector, true);
+ struct hclge_dev *hdev =
+ container_of(work, struct hclge_dev, mbx_service_task);
+
+ if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
+ return;
+
+ clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
+
+ hclge_mbx_handler(hdev);
+
+ clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
}
static void hclge_service_task(struct work_struct *work)
@@ -2630,10 +2879,20 @@ static void hclge_service_task(struct work_struct *work)
struct hclge_dev *hdev =
container_of(work, struct hclge_dev, service_task);
- hclge_misc_irq_service_task(hdev);
+ /* The total rx/tx packets statstics are wanted to be updated
+ * per second. Both hclge_update_stats_for_all() and
+ * hclge_mac_get_traffic_stats() can do it.
+ */
+ if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
+ hclge_update_stats_for_all(hdev);
+ hdev->hw_stats.stats_timer = 0;
+ } else {
+ hclge_mac_get_traffic_stats(hdev);
+ }
+
hclge_update_speed_duplex(hdev);
hclge_update_link_status(hdev);
- hclge_update_stats_for_all(hdev);
+ hclge_update_led_status(hdev);
hclge_service_complete(hdev);
}
@@ -3174,49 +3433,53 @@ err:
return ret;
}
-int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
- struct hnae3_ring_chain_node *ring_chain)
+int hclge_bind_ring_with_vector(struct hclge_vport *vport,
+ int vector_id, bool en,
+ struct hnae3_ring_chain_node *ring_chain)
{
struct hclge_dev *hdev = vport->back;
- struct hclge_ctrl_vector_chain_cmd *req;
struct hnae3_ring_chain_node *node;
struct hclge_desc desc;
- int ret;
+ struct hclge_ctrl_vector_chain_cmd *req
+ = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
+ enum hclge_cmd_status status;
+ enum hclge_opcode_type op;
+ u16 tqp_type_and_id;
int i;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ADD_RING_TO_VECTOR, false);
-
- req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
+ op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
+ hclge_cmd_setup_basic_desc(&desc, op, false);
req->int_vector_id = vector_id;
i = 0;
for (node = ring_chain; node; node = node->next) {
- u16 type_and_id = 0;
-
- hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
+ tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
+ hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
+ HCLGE_INT_TYPE_S,
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
- hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
- node->tqp_index);
- hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
+ hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
+ HCLGE_TQP_ID_S, node->tqp_index);
+ hnae_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
HCLGE_INT_GL_IDX_S,
- hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
- req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
- req->vfid = vport->vport_id;
-
+ hnae_get_field(node->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S));
+ req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
+ req->vfid = vport->vport_id;
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
dev_err(&hdev->pdev->dev,
"Map TQP fail, status is %d.\n",
- ret);
- return ret;
+ status);
+ return -EIO;
}
i = 0;
hclge_cmd_setup_basic_desc(&desc,
- HCLGE_OPC_ADD_RING_TO_VECTOR,
+ op,
false);
req->int_vector_id = vector_id;
}
@@ -3224,21 +3487,21 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
if (i > 0) {
req->int_cause_num = i;
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ req->vfid = vport->vport_id;
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
dev_err(&hdev->pdev->dev,
- "Map TQP fail, status is %d.\n", ret);
- return ret;
+ "Map TQP fail, status is %d.\n", status);
+ return -EIO;
}
}
return 0;
}
-static int hclge_map_handle_ring_to_vector(
- struct hnae3_handle *handle, int vector,
- struct hnae3_ring_chain_node *ring_chain)
+static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
+ int vector,
+ struct hnae3_ring_chain_node *ring_chain)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -3247,24 +3510,20 @@ static int hclge_map_handle_ring_to_vector(
vector_id = hclge_get_vector_index(hdev, vector);
if (vector_id < 0) {
dev_err(&hdev->pdev->dev,
- "Get vector index fail. ret =%d\n", vector_id);
+ "Get vector index fail. vector_id =%d\n", vector_id);
return vector_id;
}
- return hclge_map_vport_ring_to_vector(vport, vector_id, ring_chain);
+ return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
}
-static int hclge_unmap_ring_from_vector(
- struct hnae3_handle *handle, int vector,
- struct hnae3_ring_chain_node *ring_chain)
+static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
+ int vector,
+ struct hnae3_ring_chain_node *ring_chain)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- struct hclge_ctrl_vector_chain_cmd *req;
- struct hnae3_ring_chain_node *node;
- struct hclge_desc desc;
- int i, vector_id;
- int ret;
+ int vector_id, ret;
vector_id = hclge_get_vector_index(hdev, vector);
if (vector_id < 0) {
@@ -3273,54 +3532,17 @@ static int hclge_unmap_ring_from_vector(
return vector_id;
}
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_DEL_RING_TO_VECTOR, false);
-
- req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
- req->int_vector_id = vector_id;
-
- i = 0;
- for (node = ring_chain; node; node = node->next) {
- u16 type_and_id = 0;
-
- hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
- hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
- hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
- node->tqp_index);
- hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
- HCLGE_INT_GL_IDX_S,
- hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
-
- req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
- req->vfid = vport->vport_id;
-
- if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
- req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Unmap TQP fail, status is %d.\n",
- ret);
- return ret;
- }
- i = 0;
- hclge_cmd_setup_basic_desc(&desc,
- HCLGE_OPC_DEL_RING_TO_VECTOR,
- false);
- req->int_vector_id = vector_id;
- }
+ ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
+ if (ret) {
+ dev_err(&handle->pdev->dev,
+ "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
+ vector_id,
+ ret);
+ return ret;
}
- if (i > 0) {
- req->int_cause_num = i;
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Unmap TQP fail, status is %d.\n", ret);
- return ret;
- }
- }
+ /* Free this MSIX or MSI vector */
+ hclge_free_vector(hdev, vector_id);
return 0;
}
@@ -4077,6 +4299,91 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
return status;
}
+static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
+ u16 cmdq_resp, u8 resp_code)
+{
+#define HCLGE_ETHERTYPE_SUCCESS_ADD 0
+#define HCLGE_ETHERTYPE_ALREADY_ADD 1
+#define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
+#define HCLGE_ETHERTYPE_KEY_CONFLICT 3
+
+ int return_status;
+
+ if (cmdq_resp) {
+ dev_err(&hdev->pdev->dev,
+ "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
+ cmdq_resp);
+ return -EIO;
+ }
+
+ switch (resp_code) {
+ case HCLGE_ETHERTYPE_SUCCESS_ADD:
+ case HCLGE_ETHERTYPE_ALREADY_ADD:
+ return_status = 0;
+ break;
+ case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed for manager table overflow.\n");
+ return_status = -EIO;
+ break;
+ case HCLGE_ETHERTYPE_KEY_CONFLICT:
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed for key conflict.\n");
+ return_status = -EIO;
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed for undefined, code=%d.\n",
+ resp_code);
+ return_status = -EIO;
+ }
+
+ return return_status;
+}
+
+static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
+ const struct hclge_mac_mgr_tbl_entry_cmd *req)
+{
+ struct hclge_desc desc;
+ u8 resp_code;
+ u16 retval;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
+ memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed for cmd_send, ret =%d.\n",
+ ret);
+ return ret;
+ }
+
+ resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
+ retval = le16_to_cpu(desc.retval);
+
+ return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
+}
+
+static int init_mgr_tbl(struct hclge_dev *hdev)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
+ ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed, ret =%d.\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -4090,6 +4397,7 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
const unsigned char *new_addr = (const unsigned char *)p;
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ int ret;
/* mac addr check */
if (is_zero_ether_addr(new_addr) ||
@@ -4101,14 +4409,39 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
return -EINVAL;
}
- hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr);
+ ret = hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "remove old uc mac address fail, ret =%d.\n",
+ ret);
- if (!hclge_add_uc_addr(handle, new_addr)) {
- ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
- return 0;
+ ret = hclge_add_uc_addr(handle, new_addr);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "add uc mac address fail, ret =%d.\n",
+ ret);
+
+ ret = hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "restore uc mac address fail, ret =%d.\n",
+ ret);
+ }
+
+ return -EIO;
}
- return -EIO;
+ ret = hclge_mac_pause_addr_cfg(hdev, new_addr);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "configure mac pause address fail, ret =%d.\n",
+ ret);
+ return -EIO;
+ }
+
+ ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
+
+ return 0;
}
static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
@@ -4134,6 +4467,17 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
return 0;
}
+#define HCLGE_FILTER_TYPE_VF 0
+#define HCLGE_FILTER_TYPE_PORT 1
+
+static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable);
+}
+
int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
bool is_kill, u16 vlan, u8 qos, __be16 proto)
{
@@ -4250,43 +4594,204 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto);
}
+static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
+{
+ struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
+ struct hclge_vport_vtag_tx_cfg_cmd *req;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ int status;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
+
+ req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
+ req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
+ req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG_B,
+ vcfg->accept_tag ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG_B,
+ vcfg->accept_untag ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
+ vcfg->insert_tag1_en ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
+ vcfg->insert_tag2_en ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
+
+ req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
+ req->vf_bitmap[req->vf_offset] =
+ 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Send port txvlan cfg command fail, ret =%d\n",
+ status);
+
+ return status;
+}
+
+static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
+{
+ struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
+ struct hclge_vport_vtag_rx_cfg_cmd *req;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ int status;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
+
+ req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
+ vcfg->strip_tag1_en ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
+ vcfg->strip_tag2_en ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
+ vcfg->vlan1_vlan_prionly ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
+ vcfg->vlan2_vlan_prionly ? 1 : 0);
+
+ req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
+ req->vf_bitmap[req->vf_offset] =
+ 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Send port rxvlan cfg command fail, ret =%d\n",
+ status);
+
+ return status;
+}
+
+static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
+{
+ struct hclge_rx_vlan_type_cfg_cmd *rx_req;
+ struct hclge_tx_vlan_type_cfg_cmd *tx_req;
+ struct hclge_desc desc;
+ int status;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
+ rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
+ rx_req->ot_fst_vlan_type =
+ cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
+ rx_req->ot_sec_vlan_type =
+ cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
+ rx_req->in_fst_vlan_type =
+ cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
+ rx_req->in_sec_vlan_type =
+ cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Send rxvlan protocol type command fail, ret =%d\n",
+ status);
+ return status;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
+
+ tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data;
+ tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
+ tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Send txvlan protocol type command fail, ret =%d\n",
+ status);
+
+ return status;
+}
+
static int hclge_init_vlan_config(struct hclge_dev *hdev)
{
-#define HCLGE_VLAN_TYPE_VF_TABLE 0
-#define HCLGE_VLAN_TYPE_PORT_TABLE 1
+#define HCLGE_DEF_VLAN_TYPE 0x8100
+
struct hnae3_handle *handle;
+ struct hclge_vport *vport;
int ret;
+ int i;
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE,
- true);
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true);
if (ret)
return ret;
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE,
- true);
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true);
if (ret)
return ret;
+ hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
+
+ ret = hclge_set_vlan_protocol_type(hdev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ vport->txvlan_cfg.accept_tag = true;
+ vport->txvlan_cfg.accept_untag = true;
+ vport->txvlan_cfg.insert_tag1_en = false;
+ vport->txvlan_cfg.insert_tag2_en = false;
+ vport->txvlan_cfg.default_tag1 = 0;
+ vport->txvlan_cfg.default_tag2 = 0;
+
+ ret = hclge_set_vlan_tx_offload_cfg(vport);
+ if (ret)
+ return ret;
+
+ vport->rxvlan_cfg.strip_tag1_en = false;
+ vport->rxvlan_cfg.strip_tag2_en = true;
+ vport->rxvlan_cfg.vlan1_vlan_prionly = false;
+ vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+
+ ret = hclge_set_vlan_rx_offload_cfg(vport);
+ if (ret)
+ return ret;
+ }
+
handle = &hdev->vport[0].nic;
return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
}
+static int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ vport->rxvlan_cfg.strip_tag1_en = false;
+ vport->rxvlan_cfg.strip_tag2_en = enable;
+ vport->rxvlan_cfg.vlan1_vlan_prionly = false;
+ vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+
+ return hclge_set_vlan_rx_offload_cfg(vport);
+}
+
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_config_max_frm_size_cmd *req;
struct hclge_dev *hdev = vport->back;
struct hclge_desc desc;
+ int max_frm_size;
int ret;
- if ((new_mtu < HCLGE_MAC_MIN_MTU) || (new_mtu > HCLGE_MAC_MAX_MTU))
+ max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+ if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
+ max_frm_size > HCLGE_MAC_MAX_FRAME)
return -EINVAL;
- hdev->mps = new_mtu;
+ max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
+
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
req = (struct hclge_config_max_frm_size_cmd *)desc.data;
- req->max_frm_size = cpu_to_le16(new_mtu);
+ req->max_frm_size = cpu_to_le16(max_frm_size);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -4294,6 +4799,8 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
return ret;
}
+ hdev->mps = max_frm_size;
+
return 0;
}
@@ -4341,7 +4848,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
}
-static void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
+void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -4392,6 +4899,100 @@ static u32 hclge_get_fw_version(struct hnae3_handle *handle)
return hdev->fw_version;
}
+static void hclge_get_flowctrl_adv(struct hnae3_handle *handle,
+ u32 *flowctrl_adv)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ if (!phydev)
+ return;
+
+ *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) |
+ (phydev->advertising & ADVERTISED_Asym_Pause);
+}
+
+static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ if (!phydev)
+ return;
+
+ phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+
+ if (rx_en)
+ phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+
+ if (tx_en)
+ phydev->advertising ^= ADVERTISED_Asym_Pause;
+}
+
+static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
+{
+ int ret;
+
+ if (rx_en && tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_FULL;
+ else if (rx_en && !tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
+ else if (!rx_en && tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
+ else
+ hdev->fc_mode_last_time = HCLGE_FC_NONE;
+
+ if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
+ return 0;
+
+ ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
+
+ return 0;
+}
+
+int hclge_cfg_flowctrl(struct hclge_dev *hdev)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ u16 remote_advertising = 0;
+ u16 local_advertising = 0;
+ u32 rx_pause, tx_pause;
+ u8 flowctl;
+
+ if (!phydev->link || !phydev->autoneg)
+ return 0;
+
+ if (phydev->advertising & ADVERTISED_Pause)
+ local_advertising = ADVERTISE_PAUSE_CAP;
+
+ if (phydev->advertising & ADVERTISED_Asym_Pause)
+ local_advertising |= ADVERTISE_PAUSE_ASYM;
+
+ if (phydev->pause)
+ remote_advertising = LPA_PAUSE_CAP;
+
+ if (phydev->asym_pause)
+ remote_advertising |= LPA_PAUSE_ASYM;
+
+ flowctl = mii_resolve_flowctrl_fdx(local_advertising,
+ remote_advertising);
+ tx_pause = flowctl & FLOW_CTRL_TX;
+ rx_pause = flowctl & FLOW_CTRL_RX;
+
+ if (phydev->duplex == HCLGE_MAC_HALF) {
+ tx_pause = 0;
+ rx_pause = 0;
+ }
+
+ return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
+}
+
static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
u32 *rx_en, u32 *tx_en)
{
@@ -4421,6 +5022,41 @@ static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
}
}
+static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
+ u32 rx_en, u32 tx_en)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ u32 fc_autoneg;
+
+ /* Only support flow control negotiation for netdev with
+ * phy attached for now.
+ */
+ if (!phydev)
+ return -EOPNOTSUPP;
+
+ fc_autoneg = hclge_get_autoneg(handle);
+ if (auto_neg != fc_autoneg) {
+ dev_info(&hdev->pdev->dev,
+ "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
+ dev_info(&hdev->pdev->dev,
+ "Priority flow control enabled. Cannot set link flow control.\n");
+ return -EOPNOTSUPP;
+ }
+
+ hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
+
+ if (!fc_autoneg)
+ return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
+
+ return phy_start_aneg(phydev);
+}
+
static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
u8 *auto_neg, u32 *speed, u8 *duplex)
{
@@ -4661,6 +5297,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev->pdev = pdev;
hdev->ae_dev = ae_dev;
hdev->reset_type = HNAE3_NONE_RESET;
+ hdev->reset_request = 0;
+ hdev->reset_pending = 0;
ae_dev->priv = hdev;
ret = hclge_pci_init(hdev);
@@ -4768,16 +5406,28 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ ret = init_mgr_tbl(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
+ return ret;
+ }
+
hclge_dcb_ops_set(hdev);
timer_setup(&hdev->service_timer, hclge_service_timer, 0);
INIT_WORK(&hdev->service_task, hclge_service_task);
+ INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
+ INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
/* Enable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, true);
set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
set_bit(HCLGE_STATE_DOWN, &hdev->state);
+ clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
+ clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
return 0;
@@ -4889,25 +5539,471 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
del_timer_sync(&hdev->service_timer);
if (hdev->service_task.func)
cancel_work_sync(&hdev->service_task);
+ if (hdev->rst_service_task.func)
+ cancel_work_sync(&hdev->rst_service_task);
+ if (hdev->mbx_service_task.func)
+ cancel_work_sync(&hdev->mbx_service_task);
if (mac->phydev)
mdiobus_unregister(mac->mdio_bus);
/* Disable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, false);
- hclge_free_vector(hdev, 0);
hclge_destroy_cmd_queue(&hdev->hw);
+ hclge_misc_irq_uninit(hdev);
hclge_pci_uninit(hdev);
ae_dev->priv = NULL;
}
+static u32 hclge_get_max_channels(struct hnae3_handle *handle)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
+}
+
+static void hclge_get_channels(struct hnae3_handle *handle,
+ struct ethtool_channels *ch)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ ch->max_combined = hclge_get_max_channels(handle);
+ ch->other_count = 1;
+ ch->max_other = 1;
+ ch->combined_count = vport->alloc_tqps;
+}
+
+static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
+ u16 *free_tqps, u16 *max_rss_size)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u16 temp_tqps = 0;
+ int i;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ if (!hdev->htqp[i].alloced)
+ temp_tqps++;
+ }
+ *free_tqps = temp_tqps;
+ *max_rss_size = hdev->rss_size_max;
+}
+
+static void hclge_release_tqp(struct hclge_vport *vport)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_dev *hdev = vport->back;
+ int i;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ struct hclge_tqp *tqp =
+ container_of(kinfo->tqp[i], struct hclge_tqp, q);
+
+ tqp->q.handle = NULL;
+ tqp->q.tqp_index = 0;
+ tqp->alloced = false;
+ }
+
+ devm_kfree(&hdev->pdev->dev, kinfo->tqp);
+ kinfo->tqp = NULL;
+}
+
+static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_dev *hdev = vport->back;
+ int cur_rss_size = kinfo->rss_size;
+ int cur_tqps = kinfo->num_tqps;
+ u16 tc_offset[HCLGE_MAX_TC_NUM];
+ u16 tc_valid[HCLGE_MAX_TC_NUM];
+ u16 tc_size[HCLGE_MAX_TC_NUM];
+ u16 roundup_size;
+ u32 *rss_indir;
+ int ret, i;
+
+ hclge_release_tqp(vport);
+
+ ret = hclge_knic_setup(vport, new_tqps_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_map_tqp_to_vport(hdev, vport);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_tm_schd_init(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ roundup_size = roundup_pow_of_two(kinfo->rss_size);
+ roundup_size = ilog2(roundup_size);
+ /* Set the RSS TC mode according to the new RSS size */
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ tc_valid[i] = 0;
+
+ if (!(hdev->hw_tc_map & BIT(i)))
+ continue;
+
+ tc_valid[i] = 1;
+ tc_size[i] = roundup_size;
+ tc_offset[i] = kinfo->rss_size * i;
+ }
+ ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
+ if (ret)
+ return ret;
+
+ /* Reinitializes the rss indirect table according to the new RSS size */
+ rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
+ if (!rss_indir)
+ return -ENOMEM;
+
+ for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+ rss_indir[i] = i % kinfo->rss_size;
+
+ ret = hclge_set_rss(handle, rss_indir, NULL, 0);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
+ ret);
+
+ kfree(rss_indir);
+
+ if (!ret)
+ dev_info(&hdev->pdev->dev,
+ "Channels changed, rss_size from %d to %d, tqps from %d to %d",
+ cur_rss_size, kinfo->rss_size,
+ cur_tqps, kinfo->rss_size * kinfo->num_tc);
+
+ return ret;
+}
+
+static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
+ u32 *regs_num_64_bit)
+{
+ struct hclge_desc desc;
+ u32 total_num;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query register number cmd failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ *regs_num_32_bit = le32_to_cpu(desc.data[0]);
+ *regs_num_64_bit = le32_to_cpu(desc.data[1]);
+
+ total_num = *regs_num_32_bit + *regs_num_64_bit;
+ if (!total_num)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
+ void *data)
+{
+#define HCLGE_32_BIT_REG_RTN_DATANUM 8
+
+ struct hclge_desc *desc;
+ u32 *reg_val = data;
+ __le32 *desc_data;
+ int cmd_num;
+ int i, k, n;
+ int ret;
+
+ if (regs_num == 0)
+ return 0;
+
+ cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
+ desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query 32 bit register cmd failed, ret = %d.\n", ret);
+ kfree(desc);
+ return ret;
+ }
+
+ for (i = 0; i < cmd_num; i++) {
+ if (i == 0) {
+ desc_data = (__le32 *)(&desc[i].data[0]);
+ n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
+ } else {
+ desc_data = (__le32 *)(&desc[i]);
+ n = HCLGE_32_BIT_REG_RTN_DATANUM;
+ }
+ for (k = 0; k < n; k++) {
+ *reg_val++ = le32_to_cpu(*desc_data++);
+
+ regs_num--;
+ if (!regs_num)
+ break;
+ }
+ }
+
+ kfree(desc);
+ return 0;
+}
+
+static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
+ void *data)
+{
+#define HCLGE_64_BIT_REG_RTN_DATANUM 4
+
+ struct hclge_desc *desc;
+ u64 *reg_val = data;
+ __le64 *desc_data;
+ int cmd_num;
+ int i, k, n;
+ int ret;
+
+ if (regs_num == 0)
+ return 0;
+
+ cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
+ desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query 64 bit register cmd failed, ret = %d.\n", ret);
+ kfree(desc);
+ return ret;
+ }
+
+ for (i = 0; i < cmd_num; i++) {
+ if (i == 0) {
+ desc_data = (__le64 *)(&desc[i].data[0]);
+ n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
+ } else {
+ desc_data = (__le64 *)(&desc[i]);
+ n = HCLGE_64_BIT_REG_RTN_DATANUM;
+ }
+ for (k = 0; k < n; k++) {
+ *reg_val++ = le64_to_cpu(*desc_data++);
+
+ regs_num--;
+ if (!regs_num)
+ break;
+ }
+ }
+
+ kfree(desc);
+ return 0;
+}
+
+static int hclge_get_regs_len(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 regs_num_32_bit, regs_num_64_bit;
+ int ret;
+
+ ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get register number failed, ret = %d.\n", ret);
+ return -EOPNOTSUPP;
+ }
+
+ return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
+}
+
+static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
+ void *data)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 regs_num_32_bit, regs_num_64_bit;
+ int ret;
+
+ *version = hdev->fw_version;
+
+ ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get register number failed, ret = %d.\n", ret);
+ return;
+ }
+
+ ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get 32 bit register failed, ret = %d.\n", ret);
+ return;
+ }
+
+ data = (u32 *)data + regs_num_32_bit;
+ ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit,
+ data);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Get 64 bit register failed, ret = %d.\n", ret);
+}
+
+static int hclge_set_led_status_sfp(struct hclge_dev *hdev, u8 speed_led_status,
+ u8 act_led_status, u8 link_led_status,
+ u8 locate_led_status)
+{
+ struct hclge_set_led_state_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
+
+ req = (struct hclge_set_led_state_cmd *)desc.data;
+ hnae_set_field(req->port_speed_led_config, HCLGE_LED_PORT_SPEED_STATE_M,
+ HCLGE_LED_PORT_SPEED_STATE_S, speed_led_status);
+ hnae_set_field(req->link_led_config, HCLGE_LED_ACTIVITY_STATE_M,
+ HCLGE_LED_ACTIVITY_STATE_S, act_led_status);
+ hnae_set_field(req->activity_led_config, HCLGE_LED_LINK_STATE_M,
+ HCLGE_LED_LINK_STATE_S, link_led_status);
+ hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
+ HCLGE_LED_LOCATE_STATE_S, locate_led_status);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Send set led state cmd error, ret =%d\n", ret);
+
+ return ret;
+}
+
+enum hclge_led_status {
+ HCLGE_LED_OFF,
+ HCLGE_LED_ON,
+ HCLGE_LED_NO_CHANGE = 0xFF,
+};
+
+static int hclge_set_led_id(struct hnae3_handle *handle,
+ enum ethtool_phys_id_state status)
+{
+#define BLINK_FREQUENCY 2
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ int ret = 0;
+
+ if (phydev || hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
+ return -EOPNOTSUPP;
+
+ switch (status) {
+ case ETHTOOL_ID_ACTIVE:
+ ret = hclge_set_led_status_sfp(hdev,
+ HCLGE_LED_NO_CHANGE,
+ HCLGE_LED_NO_CHANGE,
+ HCLGE_LED_NO_CHANGE,
+ HCLGE_LED_ON);
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ ret = hclge_set_led_status_sfp(hdev,
+ HCLGE_LED_NO_CHANGE,
+ HCLGE_LED_NO_CHANGE,
+ HCLGE_LED_NO_CHANGE,
+ HCLGE_LED_OFF);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+enum hclge_led_port_speed {
+ HCLGE_SPEED_LED_FOR_1G,
+ HCLGE_SPEED_LED_FOR_10G,
+ HCLGE_SPEED_LED_FOR_25G,
+ HCLGE_SPEED_LED_FOR_40G,
+ HCLGE_SPEED_LED_FOR_50G,
+ HCLGE_SPEED_LED_FOR_100G,
+};
+
+static u8 hclge_led_get_speed_status(u32 speed)
+{
+ u8 speed_led;
+
+ switch (speed) {
+ case HCLGE_MAC_SPEED_1G:
+ speed_led = HCLGE_SPEED_LED_FOR_1G;
+ break;
+ case HCLGE_MAC_SPEED_10G:
+ speed_led = HCLGE_SPEED_LED_FOR_10G;
+ break;
+ case HCLGE_MAC_SPEED_25G:
+ speed_led = HCLGE_SPEED_LED_FOR_25G;
+ break;
+ case HCLGE_MAC_SPEED_40G:
+ speed_led = HCLGE_SPEED_LED_FOR_40G;
+ break;
+ case HCLGE_MAC_SPEED_50G:
+ speed_led = HCLGE_SPEED_LED_FOR_50G;
+ break;
+ case HCLGE_MAC_SPEED_100G:
+ speed_led = HCLGE_SPEED_LED_FOR_100G;
+ break;
+ default:
+ speed_led = HCLGE_LED_NO_CHANGE;
+ }
+
+ return speed_led;
+}
+
+static int hclge_update_led_status(struct hclge_dev *hdev)
+{
+ u8 port_speed_status, link_status, activity_status;
+ u64 rx_pkts, tx_pkts;
+
+ if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
+ return 0;
+
+ port_speed_status = hclge_led_get_speed_status(hdev->hw.mac.speed);
+
+ rx_pkts = hdev->hw_stats.mac_stats.mac_rx_total_pkt_num;
+ tx_pkts = hdev->hw_stats.mac_stats.mac_tx_total_pkt_num;
+ if (rx_pkts != hdev->rx_pkts_for_led ||
+ tx_pkts != hdev->tx_pkts_for_led)
+ activity_status = HCLGE_LED_ON;
+ else
+ activity_status = HCLGE_LED_OFF;
+ hdev->rx_pkts_for_led = rx_pkts;
+ hdev->tx_pkts_for_led = tx_pkts;
+
+ if (hdev->hw.mac.link)
+ link_status = HCLGE_LED_ON;
+ else
+ link_status = HCLGE_LED_OFF;
+
+ return hclge_set_led_status_sfp(hdev, port_speed_status,
+ activity_status, link_status,
+ HCLGE_LED_NO_CHANGE);
+}
+
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
.init_client_instance = hclge_init_client_instance,
.uninit_client_instance = hclge_uninit_client_instance,
- .map_ring_to_vector = hclge_map_handle_ring_to_vector,
- .unmap_ring_from_vector = hclge_unmap_ring_from_vector,
+ .map_ring_to_vector = hclge_map_ring_to_vector,
+ .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
.get_vector = hclge_get_vector,
.set_promisc_mode = hclge_set_promisc_mode,
.set_loopback = hclge_set_loopback,
@@ -4934,6 +6030,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_autoneg = hclge_set_autoneg,
.get_autoneg = hclge_get_autoneg,
.get_pauseparam = hclge_get_pauseparam,
+ .set_pauseparam = hclge_set_pauseparam,
.set_mtu = hclge_set_mtu,
.reset_queue = hclge_reset_tqp,
.get_stats = hclge_get_stats,
@@ -4942,9 +6039,18 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_sset_count = hclge_get_sset_count,
.get_fw_version = hclge_get_fw_version,
.get_mdix_mode = hclge_get_mdix_mode,
+ .enable_vlan_filter = hclge_enable_vlan_filter,
.set_vlan_filter = hclge_set_port_vlan_filter,
.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
+ .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
.reset_event = hclge_reset_event,
+ .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
+ .set_channels = hclge_set_channels,
+ .get_channels = hclge_get_channels,
+ .get_flowctrl_adv = hclge_get_flowctrl_adv,
+ .get_regs_len = hclge_get_regs_len,
+ .get_regs = hclge_get_regs,
+ .set_led_id = hclge_set_led_id,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 7027814ea5d7..d99a76a9557c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -79,6 +79,10 @@
#define HCLGE_PHY_MDIX_STATUS_B (6)
#define HCLGE_PHY_SPEED_DUP_RESOLVE_B (11)
+/* Factor used to calculate offset and bitmap of VF num */
+#define HCLGE_VF_NUM_PER_CMD 64
+#define HCLGE_VF_NUM_PER_BYTE 8
+
/* Reset related Registers */
#define HCLGE_MISC_RESET_STS_REG 0x20700
#define HCLGE_GLOBAL_RESET_REG 0x20A00
@@ -92,6 +96,16 @@
#define HCLGE_VECTOR0_CORERESET_INT_B 6
#define HCLGE_VECTOR0_IMPRESET_INT_B 7
+/* Vector0 interrupt CMDQ event source register(RW) */
+#define HCLGE_VECTOR0_CMDQ_SRC_REG 0x27100
+/* CMDQ register bits for RX event(=MBX event) */
+#define HCLGE_VECTOR0_RX_CMDQ_INT_B 1
+
+#define HCLGE_MAC_DEFAULT_FRAME \
+ (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
+#define HCLGE_MAC_MIN_FRAME 64
+#define HCLGE_MAC_MAX_FRAME 9728
+
enum HCLGE_DEV_STATE {
HCLGE_STATE_REINITING,
HCLGE_STATE_DOWN,
@@ -99,12 +113,20 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_REMOVING,
HCLGE_STATE_SERVICE_INITED,
HCLGE_STATE_SERVICE_SCHED,
+ HCLGE_STATE_RST_SERVICE_SCHED,
+ HCLGE_STATE_RST_HANDLING,
+ HCLGE_STATE_MBX_SERVICE_SCHED,
HCLGE_STATE_MBX_HANDLING,
- HCLGE_STATE_MBX_IRQ,
- HCLGE_STATE_RESET_INT,
+ HCLGE_STATE_STATISTICS_UPDATING,
HCLGE_STATE_MAX
};
+enum hclge_evt_cause {
+ HCLGE_VECTOR0_EVENT_RST,
+ HCLGE_VECTOR0_EVENT_MBX,
+ HCLGE_VECTOR0_EVENT_OTHER,
+};
+
#define HCLGE_MPF_ENBALE 1
struct hclge_caps {
u16 num_tqp;
@@ -208,6 +230,7 @@ struct hclge_cfg {
u8 tc_num;
u16 tqp_desc_num;
u16 rx_buf_len;
+ u16 rss_size_max;
u8 phy_addr;
u8 media_type;
u8 mac_addr[ETH_ALEN];
@@ -364,14 +387,23 @@ struct hclge_mac_stats {
u64 mac_tx_multi_pkt_num;
u64 mac_tx_broad_pkt_num;
u64 mac_tx_undersize_pkt_num;
- u64 mac_tx_overrsize_pkt_num;
+ u64 mac_tx_oversize_pkt_num;
u64 mac_tx_64_oct_pkt_num;
u64 mac_tx_65_127_oct_pkt_num;
u64 mac_tx_128_255_oct_pkt_num;
u64 mac_tx_256_511_oct_pkt_num;
u64 mac_tx_512_1023_oct_pkt_num;
u64 mac_tx_1024_1518_oct_pkt_num;
- u64 mac_tx_1519_max_oct_pkt_num;
+ u64 mac_tx_1519_2047_oct_pkt_num;
+ u64 mac_tx_2048_4095_oct_pkt_num;
+ u64 mac_tx_4096_8191_oct_pkt_num;
+ u64 mac_tx_8192_12287_oct_pkt_num; /* valid for GE MAC only */
+ u64 mac_tx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */
+ u64 mac_tx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC */
+ u64 mac_tx_12288_16383_oct_pkt_num;
+ u64 mac_tx_1519_max_good_oct_pkt_num;
+ u64 mac_tx_1519_max_bad_oct_pkt_num;
+
u64 mac_rx_total_pkt_num;
u64 mac_rx_total_oct_num;
u64 mac_rx_good_pkt_num;
@@ -382,33 +414,52 @@ struct hclge_mac_stats {
u64 mac_rx_multi_pkt_num;
u64 mac_rx_broad_pkt_num;
u64 mac_rx_undersize_pkt_num;
- u64 mac_rx_overrsize_pkt_num;
+ u64 mac_rx_oversize_pkt_num;
u64 mac_rx_64_oct_pkt_num;
u64 mac_rx_65_127_oct_pkt_num;
u64 mac_rx_128_255_oct_pkt_num;
u64 mac_rx_256_511_oct_pkt_num;
u64 mac_rx_512_1023_oct_pkt_num;
u64 mac_rx_1024_1518_oct_pkt_num;
- u64 mac_rx_1519_max_oct_pkt_num;
-
- u64 mac_trans_fragment_pkt_num;
- u64 mac_trans_undermin_pkt_num;
- u64 mac_trans_jabber_pkt_num;
- u64 mac_trans_err_all_pkt_num;
- u64 mac_trans_from_app_good_pkt_num;
- u64 mac_trans_from_app_bad_pkt_num;
- u64 mac_rcv_fragment_pkt_num;
- u64 mac_rcv_undermin_pkt_num;
- u64 mac_rcv_jabber_pkt_num;
- u64 mac_rcv_fcs_err_pkt_num;
- u64 mac_rcv_send_app_good_pkt_num;
- u64 mac_rcv_send_app_bad_pkt_num;
+ u64 mac_rx_1519_2047_oct_pkt_num;
+ u64 mac_rx_2048_4095_oct_pkt_num;
+ u64 mac_rx_4096_8191_oct_pkt_num;
+ u64 mac_rx_8192_12287_oct_pkt_num;/* valid for GE MAC only */
+ u64 mac_rx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */
+ u64 mac_rx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC only */
+ u64 mac_rx_12288_16383_oct_pkt_num;
+ u64 mac_rx_1519_max_good_oct_pkt_num;
+ u64 mac_rx_1519_max_bad_oct_pkt_num;
+
+ u64 mac_tx_fragment_pkt_num;
+ u64 mac_tx_undermin_pkt_num;
+ u64 mac_tx_jabber_pkt_num;
+ u64 mac_tx_err_all_pkt_num;
+ u64 mac_tx_from_app_good_pkt_num;
+ u64 mac_tx_from_app_bad_pkt_num;
+ u64 mac_rx_fragment_pkt_num;
+ u64 mac_rx_undermin_pkt_num;
+ u64 mac_rx_jabber_pkt_num;
+ u64 mac_rx_fcs_err_pkt_num;
+ u64 mac_rx_send_app_good_pkt_num;
+ u64 mac_rx_send_app_bad_pkt_num;
};
+#define HCLGE_STATS_TIMER_INTERVAL (60 * 5)
struct hclge_hw_stats {
struct hclge_mac_stats mac_stats;
struct hclge_64_bit_stats all_64_bit_stats;
struct hclge_32_bit_stats all_32_bit_stats;
+ u32 stats_timer;
+};
+
+struct hclge_vlan_type_cfg {
+ u16 rx_ot_fst_vlan_type;
+ u16 rx_ot_sec_vlan_type;
+ u16 rx_in_fst_vlan_type;
+ u16 rx_in_sec_vlan_type;
+ u16 tx_ot_vlan_type;
+ u16 tx_in_vlan_type;
};
struct hclge_dev {
@@ -420,6 +471,8 @@ struct hclge_dev {
unsigned long state;
enum hnae3_reset_type reset_type;
+ unsigned long reset_request; /* reset has been requested */
+ unsigned long reset_pending; /* client rst is pending to be served */
u32 fw_version;
u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */
u16 num_tqps; /* Num task queue pairs of this PF */
@@ -469,6 +522,8 @@ struct hclge_dev {
unsigned long service_timer_previous;
struct timer_list service_timer;
struct work_struct service_task;
+ struct work_struct rst_service_task;
+ struct work_struct mbx_service_task;
bool cur_promisc;
int num_alloc_vfs; /* Actual number of VFs allocated */
@@ -493,6 +548,29 @@ struct hclge_dev {
enum hclge_mta_dmac_sel_type mta_mac_sel_type;
bool enable_mta; /* Mutilcast filter enable */
bool accept_mta_mc; /* Whether accept mta filter multicast */
+
+ struct hclge_vlan_type_cfg vlan_type_cfg;
+
+ u64 rx_pkts_for_led;
+ u64 tx_pkts_for_led;
+};
+
+/* VPort level vlan tag configuration for TX direction */
+struct hclge_tx_vtag_cfg {
+ bool accept_tag; /* Whether accept tagged packet from host */
+ bool accept_untag; /* Whether accept untagged packet from host */
+ bool insert_tag1_en; /* Whether insert inner vlan tag */
+ bool insert_tag2_en; /* Whether insert outer vlan tag */
+ u16 default_tag1; /* The default inner vlan tag to insert */
+ u16 default_tag2; /* The default outer vlan tag to insert */
+};
+
+/* VPort level vlan tag configuration for RX direction */
+struct hclge_rx_vtag_cfg {
+ bool strip_tag1_en; /* Whether strip inner vlan tag */
+ bool strip_tag2_en; /* Whether strip outer vlan tag */
+ bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */
+ bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */
};
struct hclge_vport {
@@ -507,6 +585,9 @@ struct hclge_vport {
u16 bw_limit; /* VSI BW Limit (0 = disabled) */
u8 dwrr;
+ struct hclge_tx_vtag_cfg txvlan_cfg;
+ struct hclge_rx_vtag_cfg rxvlan_cfg;
+
int vport_id;
struct hclge_dev *back; /* Back reference to associated dev */
struct hnae3_handle nic;
@@ -529,8 +610,10 @@ int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
u8 func_id,
bool enable);
struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle);
-int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector,
- struct hnae3_ring_chain_node *ring_chain);
+int hclge_bind_ring_with_vector(struct hclge_vport *vport,
+ int vector_id, bool en,
+ struct hnae3_ring_chain_node *ring_chain);
+
static inline int hclge_get_queue_id(struct hnae3_queue *queue)
{
struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q);
@@ -544,4 +627,8 @@ int hclge_set_vf_vlan_common(struct hclge_dev *vport, int vfid,
int hclge_buffer_alloc(struct hclge_dev *hdev);
int hclge_rss_init_hw(struct hclge_dev *hdev);
+
+void hclge_mbx_handler(struct hclge_dev *hdev);
+void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
+int hclge_cfg_flowctrl(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
new file mode 100644
index 000000000000..f38fc5ce9f51
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include "hclge_main.h"
+#include "hclge_mbx.h"
+#include "hnae3.h"
+
+/* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF
+ * receives a mailbox message from VF.
+ * @vport: pointer to struct hclge_vport
+ * @vf_to_pf_req: pointer to hclge_mbx_vf_to_pf_cmd of the original mailbox
+ * message
+ * @resp_status: indicate to VF whether its request success(0) or failed.
+ */
+static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req,
+ int resp_status,
+ u8 *resp_data, u16 resp_data_len)
+{
+ struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
+ struct hclge_dev *hdev = vport->back;
+ enum hclge_cmd_status status;
+ struct hclge_desc desc;
+
+ resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
+
+ if (resp_data_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
+ dev_err(&hdev->pdev->dev,
+ "PF fail to gen resp to VF len %d exceeds max len %d\n",
+ resp_data_len,
+ HCLGE_MBX_MAX_RESP_DATA_SIZE);
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
+
+ resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid;
+ resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len;
+
+ resp_pf_to_vf->msg[0] = HCLGE_MBX_PF_VF_RESP;
+ resp_pf_to_vf->msg[1] = vf_to_pf_req->msg[0];
+ resp_pf_to_vf->msg[2] = vf_to_pf_req->msg[1];
+ resp_pf_to_vf->msg[3] = (resp_status == 0) ? 0 : 1;
+
+ if (resp_data && resp_data_len > 0)
+ memcpy(&resp_pf_to_vf->msg[4], resp_data, resp_data_len);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(=%d) to send response to VF\n", status);
+
+ return status;
+}
+
+static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
+ u16 mbx_opcode, u8 dest_vfid)
+{
+ struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
+ struct hclge_dev *hdev = vport->back;
+ enum hclge_cmd_status status;
+ struct hclge_desc desc;
+
+ resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
+
+ resp_pf_to_vf->dest_vfid = dest_vfid;
+ resp_pf_to_vf->msg_len = msg_len;
+ resp_pf_to_vf->msg[0] = mbx_opcode;
+
+ memcpy(&resp_pf_to_vf->msg[1], msg, msg_len);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(=%d) to send mailbox message to VF\n",
+ status);
+
+ return status;
+}
+
+static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head)
+{
+ struct hnae3_ring_chain_node *chain_tmp, *chain;
+
+ chain = head->next;
+
+ while (chain) {
+ chain_tmp = chain->next;
+ kzfree(chain);
+ chain = chain_tmp;
+ }
+}
+
+/* hclge_get_ring_chain_from_mbx: get ring type & tqpid from mailbox message
+ * msg[0]: opcode
+ * msg[1]: <not relevant to this function>
+ * msg[2]: ring_num
+ * msg[3]: first ring type (TX|RX)
+ * msg[4]: first tqp id
+ * msg[5] ~ msg[14]: other ring type and tqp id
+ */
+static int hclge_get_ring_chain_from_mbx(
+ struct hclge_mbx_vf_to_pf_cmd *req,
+ struct hnae3_ring_chain_node *ring_chain,
+ struct hclge_vport *vport)
+{
+#define HCLGE_RING_NODE_VARIABLE_NUM 3
+#define HCLGE_RING_MAP_MBX_BASIC_MSG_NUM 3
+ struct hnae3_ring_chain_node *cur_chain, *new_chain;
+ int ring_num;
+ int i;
+
+ ring_num = req->msg[2];
+
+ hnae_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
+ ring_chain->tqp_index =
+ hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]);
+ hnae_set_field(ring_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
+ HCLGE_INT_GL_IDX_S,
+ req->msg[5]);
+
+ cur_chain = ring_chain;
+
+ for (i = 1; i < ring_num; i++) {
+ new_chain = kzalloc(sizeof(*new_chain), GFP_KERNEL);
+ if (!new_chain)
+ goto err;
+
+ hnae_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
+ req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i +
+ HCLGE_RING_MAP_MBX_BASIC_MSG_NUM]);
+
+ new_chain->tqp_index =
+ hclge_get_queue_id(vport->nic.kinfo.tqp
+ [req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i +
+ HCLGE_RING_MAP_MBX_BASIC_MSG_NUM + 1]]);
+
+ hnae_set_field(new_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
+ HCLGE_INT_GL_IDX_S,
+ req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i +
+ HCLGE_RING_MAP_MBX_BASIC_MSG_NUM + 2]);
+
+ cur_chain->next = new_chain;
+ cur_chain = new_chain;
+ }
+
+ return 0;
+err:
+ hclge_free_vector_ring_chain(ring_chain);
+ return -ENOMEM;
+}
+
+static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
+ struct hclge_mbx_vf_to_pf_cmd *req)
+{
+ struct hnae3_ring_chain_node ring_chain;
+ int vector_id = req->msg[1];
+ int ret;
+
+ memset(&ring_chain, 0, sizeof(ring_chain));
+ ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport);
+ if (ret)
+ return ret;
+
+ ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain);
+ if (ret)
+ return ret;
+
+ hclge_free_vector_ring_chain(&ring_chain);
+
+ return 0;
+}
+
+static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *req)
+{
+ bool en = req->msg[1] ? true : false;
+ struct hclge_promisc_param param;
+
+ /* always enable broadcast promisc bit */
+ hclge_promisc_param_init(&param, en, en, true, vport->vport_id);
+ return hclge_cmd_set_promisc_mode(vport->back, &param);
+}
+
+static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ bool gen_resp)
+{
+ const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
+ struct hclge_dev *hdev = vport->back;
+ int status;
+
+ if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_MODIFY) {
+ const u8 *old_addr = (const u8 *)(&mbx_req->msg[8]);
+
+ hclge_rm_uc_addr_common(vport, old_addr);
+ status = hclge_add_uc_addr_common(vport, mac_addr);
+ } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_ADD) {
+ status = hclge_add_uc_addr_common(vport, mac_addr);
+ } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_REMOVE) {
+ status = hclge_rm_uc_addr_common(vport, mac_addr);
+ } else {
+ dev_err(&hdev->pdev->dev,
+ "failed to set unicast mac addr, unknown subcode %d\n",
+ mbx_req->msg[1]);
+ return -EIO;
+ }
+
+ if (gen_resp)
+ hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
+
+ return 0;
+}
+
+static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ bool gen_resp)
+{
+ const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
+ struct hclge_dev *hdev = vport->back;
+ int status;
+
+ if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) {
+ status = hclge_add_mc_addr_common(vport, mac_addr);
+ } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
+ status = hclge_rm_mc_addr_common(vport, mac_addr);
+ } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE) {
+ u8 func_id = vport->vport_id;
+ bool enable = mbx_req->msg[2];
+
+ status = hclge_cfg_func_mta_filter(hdev, func_id, enable);
+ } else {
+ dev_err(&hdev->pdev->dev,
+ "failed to set mcast mac addr, unknown subcode %d\n",
+ mbx_req->msg[1]);
+ return -EIO;
+ }
+
+ if (gen_resp)
+ hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
+
+ return 0;
+}
+
+static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ bool gen_resp)
+{
+ struct hclge_dev *hdev = vport->back;
+ int status = 0;
+
+ if (mbx_req->msg[1] == HCLGE_MBX_VLAN_FILTER) {
+ u16 vlan, proto;
+ bool is_kill;
+
+ is_kill = !!mbx_req->msg[2];
+ memcpy(&vlan, &mbx_req->msg[3], sizeof(vlan));
+ memcpy(&proto, &mbx_req->msg[5], sizeof(proto));
+ status = hclge_set_vf_vlan_common(hdev, vport->vport_id,
+ is_kill, vlan, 0,
+ cpu_to_be16(proto));
+ }
+
+ if (gen_resp)
+ status = hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
+
+ return status;
+}
+
+static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ bool gen_resp)
+{
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &hdev->hw_tc_map,
+ sizeof(u8));
+
+ return ret;
+}
+
+static int hclge_get_vf_queue_info(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ bool gen_resp)
+{
+#define HCLGE_TQPS_RSS_INFO_LEN 8
+ u8 resp_data[HCLGE_TQPS_RSS_INFO_LEN];
+ struct hclge_dev *hdev = vport->back;
+
+ /* get the queue related info */
+ memcpy(&resp_data[0], &vport->alloc_tqps, sizeof(u16));
+ memcpy(&resp_data[2], &hdev->rss_size_max, sizeof(u16));
+ memcpy(&resp_data[4], &hdev->num_desc, sizeof(u16));
+ memcpy(&resp_data[6], &hdev->rx_buf_len, sizeof(u16));
+
+ return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
+ HCLGE_TQPS_RSS_INFO_LEN);
+}
+
+static int hclge_get_link_info(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ struct hclge_dev *hdev = vport->back;
+ u16 link_status;
+ u8 msg_data[2];
+ u8 dest_vfid;
+
+ /* mac.link can only be 0 or 1 */
+ link_status = (u16)hdev->hw.mac.link;
+ memcpy(&msg_data[0], &link_status, sizeof(u16));
+ dest_vfid = mbx_req->mbx_src_vfid;
+
+ /* send this requested info to VF */
+ return hclge_send_mbx_msg(vport, msg_data, sizeof(u8),
+ HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid);
+}
+
+static void hclge_reset_vf_queue(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ u16 queue_id;
+
+ memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id));
+
+ hclge_reset_tqp(&vport->nic, queue_id);
+}
+
+void hclge_mbx_handler(struct hclge_dev *hdev)
+{
+ struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
+ struct hclge_mbx_vf_to_pf_cmd *req;
+ struct hclge_vport *vport;
+ struct hclge_desc *desc;
+ int ret;
+
+ /* handle all the mailbox requests in the queue */
+ while (hnae_get_bit(crq->desc[crq->next_to_use].flag,
+ HCLGE_CMDQ_RX_OUTVLD_B)) {
+ desc = &crq->desc[crq->next_to_use];
+ req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
+
+ vport = &hdev->vport[req->mbx_src_vfid];
+
+ switch (req->msg[0]) {
+ case HCLGE_MBX_MAP_RING_TO_VECTOR:
+ ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
+ req);
+ break;
+ case HCLGE_MBX_UNMAP_RING_TO_VECTOR:
+ ret = hclge_map_unmap_ring_to_vf_vector(vport, false,
+ req);
+ break;
+ case HCLGE_MBX_SET_PROMISC_MODE:
+ ret = hclge_set_vf_promisc_mode(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF fail(%d) to set VF promisc mode\n",
+ ret);
+ break;
+ case HCLGE_MBX_SET_UNICAST:
+ ret = hclge_set_vf_uc_mac_addr(vport, req, false);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF fail(%d) to set VF UC MAC Addr\n",
+ ret);
+ break;
+ case HCLGE_MBX_SET_MULTICAST:
+ ret = hclge_set_vf_mc_mac_addr(vport, req, false);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF fail(%d) to set VF MC MAC Addr\n",
+ ret);
+ break;
+ case HCLGE_MBX_SET_VLAN:
+ ret = hclge_set_vf_vlan_cfg(vport, req, false);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(%d) to config VF's VLAN\n",
+ ret);
+ break;
+ case HCLGE_MBX_GET_QINFO:
+ ret = hclge_get_vf_queue_info(vport, req, true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(%d) to get Q info for VF\n",
+ ret);
+ break;
+ case HCLGE_MBX_GET_TCINFO:
+ ret = hclge_get_vf_tcinfo(vport, req, true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(%d) to get TC info for VF\n",
+ ret);
+ break;
+ case HCLGE_MBX_GET_LINK_STATUS:
+ ret = hclge_get_link_info(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF fail(%d) to get link stat for VF\n",
+ ret);
+ break;
+ case HCLGE_MBX_QUEUE_RESET:
+ hclge_reset_vf_queue(vport, req);
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "un-supported mailbox message, code = %d\n",
+ req->msg[0]);
+ break;
+ }
+ hclge_mbx_ring_ptr_move_crq(crq);
+ }
+
+ /* Write back CMDQ_RQ header pointer, M7 need this pointer */
+ hclge_write_dev(&hdev->hw, HCLGE_NIC_CRQ_HEAD_REG, crq->next_to_use);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 7069e9408d7d..c1dea3a47bdd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -17,6 +17,7 @@
#define HCLGE_PHY_SUPPORTED_FEATURES (SUPPORTED_Autoneg | \
SUPPORTED_TP | \
SUPPORTED_Pause | \
+ SUPPORTED_Asym_Pause | \
PHY_10BT_FEATURES | \
PHY_100BT_FEATURES | \
PHY_1000BT_FEATURES)
@@ -183,6 +184,10 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
if (ret)
netdev_err(netdev, "failed to adjust link.\n");
+
+ ret = hclge_cfg_flowctrl(hdev);
+ if (ret)
+ netdev_err(netdev, "failed to configure flow control.\n");
}
int hclge_mac_start_phy(struct hclge_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 7bfa2e5497cb..36bd79a77940 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -23,8 +23,8 @@ enum hclge_shaper_level {
HCLGE_SHAPER_LVL_PF = 1,
};
-#define HCLGE_SHAPER_BS_U_DEF 1
-#define HCLGE_SHAPER_BS_S_DEF 4
+#define HCLGE_SHAPER_BS_U_DEF 5
+#define HCLGE_SHAPER_BS_S_DEF 20
#define HCLGE_ETHER_MAX_RATE 100000
@@ -112,7 +112,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
return 0;
}
-static int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
+int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
{
struct hclge_desc desc;
@@ -138,6 +138,46 @@ static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
+static int hclge_mac_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
+ u8 pause_trans_gap, u16 pause_trans_time)
+{
+ struct hclge_cfg_pause_param_cmd *pause_param;
+ struct hclge_desc desc;
+
+ pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
+
+ ether_addr_copy(pause_param->mac_addr, addr);
+ pause_param->pause_trans_gap = pause_trans_gap;
+ pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+int hclge_mac_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
+{
+ struct hclge_cfg_pause_param_cmd *pause_param;
+ struct hclge_desc desc;
+ u16 trans_time;
+ u8 trans_gap;
+ int ret;
+
+ pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ return ret;
+
+ trans_gap = pause_param->pause_trans_gap;
+ trans_time = le16_to_cpu(pause_param->pause_trans_time);
+
+ return hclge_mac_pause_param_cfg(hdev, mac_addr, trans_gap,
+ trans_time);
+}
+
static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
{
u8 tc;
@@ -1056,6 +1096,15 @@ static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
return hclge_tm_schd_mode_hw(hdev);
}
+static int hclge_mac_pause_param_setup_hw(struct hclge_dev *hdev)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+
+ return hclge_mac_pause_param_cfg(hdev, mac->mac_addr,
+ HCLGE_DEFAULT_PAUSE_TRANS_GAP,
+ HCLGE_DEFAULT_PAUSE_TRANS_TIME);
+}
+
static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
{
u8 enable_bitmap = 0;
@@ -1102,8 +1151,13 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
int ret;
u8 i;
- if (hdev->tm_info.fc_mode != HCLGE_FC_PFC)
- return hclge_mac_pause_setup_hw(hdev);
+ if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
+ ret = hclge_mac_pause_setup_hw(hdev);
+ if (ret)
+ return ret;
+
+ return hclge_mac_pause_param_setup_hw(hdev);
+ }
/* Only DCB-supported dev supports qset back pressure and pfc cmd */
if (!hnae3_dev_dcb_supported(hdev))
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index bf59961918ab..5401e7559437 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -18,6 +18,9 @@
#define HCLGE_TM_PORT_BASE_MODE_MSK BIT(0)
+#define HCLGE_DEFAULT_PAUSE_TRANS_GAP 0xFF
+#define HCLGE_DEFAULT_PAUSE_TRANS_TIME 0xFFFF
+
/* SP or DWRR */
#define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0)
#define HCLGE_TM_TX_SCHD_SP_MSK (0xFE)
@@ -99,6 +102,13 @@ struct hclge_pfc_en_cmd {
u8 pri_en_bitmap;
};
+struct hclge_cfg_pause_param_cmd {
+ u8 mac_addr[ETH_ALEN];
+ u8 pause_trans_gap;
+ u8 rsvd;
+ __le16 pause_trans_time;
+};
+
struct hclge_port_shapping_cmd {
__le32 port_shapping_para;
};
@@ -118,4 +128,6 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
int hclge_tm_map_cfg(struct hclge_dev *hdev);
int hclge_tm_init_hw(struct hclge_dev *hdev);
+int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
+int hclge_mac_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
new file mode 100644
index 000000000000..fb93bbd35845
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Makefile for the HISILICON network device drivers.
+#
+
+ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+
+obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
+hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o \ No newline at end of file
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
new file mode 100644
index 000000000000..85985e731311
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include "hclgevf_cmd.h"
+#include "hclgevf_main.h"
+#include "hnae3.h"
+
+#define hclgevf_is_csq(ring) ((ring)->flag & HCLGEVF_TYPE_CSQ)
+#define hclgevf_ring_to_dma_dir(ring) (hclgevf_is_csq(ring) ? \
+ DMA_TO_DEVICE : DMA_FROM_DEVICE)
+#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
+
+static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring)
+{
+ int ntc = ring->next_to_clean;
+ int ntu = ring->next_to_use;
+ int used;
+
+ used = (ntu - ntc + ring->desc_num) % ring->desc_num;
+
+ return ring->desc_num - used - 1;
+}
+
+static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
+{
+ struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
+ u16 ntc = csq->next_to_clean;
+ struct hclgevf_desc *desc;
+ int clean = 0;
+ u32 head;
+
+ desc = &csq->desc[ntc];
+ head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
+ while (head != ntc) {
+ memset(desc, 0, sizeof(*desc));
+ ntc++;
+ if (ntc == csq->desc_num)
+ ntc = 0;
+ desc = &csq->desc[ntc];
+ clean++;
+ }
+ csq->next_to_clean = ntc;
+
+ return clean;
+}
+
+static bool hclgevf_cmd_csq_done(struct hclgevf_hw *hw)
+{
+ u32 head;
+
+ head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
+
+ return head == hw->cmq.csq.next_to_use;
+}
+
+static bool hclgevf_is_special_opcode(u16 opcode)
+{
+ u16 spec_opcode[] = {0x30, 0x31, 0x32};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
+ if (spec_opcode[i] == opcode)
+ return true;
+ }
+
+ return false;
+}
+
+static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
+{
+ int size = ring->desc_num * sizeof(struct hclgevf_desc);
+
+ ring->desc = kzalloc(size, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc,
+ size, DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) {
+ ring->desc_dma_addr = 0;
+ kfree(ring->desc);
+ ring->desc = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
+{
+ dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr,
+ ring->desc_num * sizeof(ring->desc[0]),
+ hclgevf_ring_to_dma_dir(ring));
+
+ ring->desc_dma_addr = 0;
+ kfree(ring->desc);
+ ring->desc = NULL;
+}
+
+static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev,
+ struct hclgevf_cmq_ring *ring)
+{
+ struct hclgevf_hw *hw = &hdev->hw;
+ int ring_type = ring->flag;
+ u32 reg_val;
+ int ret;
+
+ ring->desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
+ spin_lock_init(&ring->lock);
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+ ring->dev = hdev;
+
+ /* allocate CSQ/CRQ descriptor */
+ ret = hclgevf_alloc_cmd_desc(ring);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret,
+ (ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ");
+ return ret;
+ }
+
+ /* initialize the hardware registers with csq/crq dma-address,
+ * descriptor number, head & tail pointers
+ */
+ switch (ring_type) {
+ case HCLGEVF_TYPE_CSQ:
+ reg_val = (u32)ring->desc_dma_addr;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
+ reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
+
+ reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+ reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
+
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
+ break;
+ case HCLGEVF_TYPE_CRQ:
+ reg_val = (u32)ring->desc_dma_addr;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
+ reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
+
+ reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+ reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
+
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
+ break;
+ }
+
+ return 0;
+}
+
+void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
+ enum hclgevf_opcode_type opcode, bool is_read)
+{
+ memset(desc, 0, sizeof(struct hclgevf_desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flag = cpu_to_le16(HCLGEVF_CMD_FLAG_NO_INTR |
+ HCLGEVF_CMD_FLAG_IN);
+ if (is_read)
+ desc->flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_WR);
+ else
+ desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR);
+}
+
+/* hclgevf_cmd_send - send command to command queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor for describing the command
+ * @num : the number of descriptors to be sent
+ *
+ * This is the main send command for command queue, it
+ * sends the queue, cleans the queue, etc
+ */
+int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
+{
+ struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
+ struct hclgevf_desc *desc_to_use;
+ bool complete = false;
+ u32 timeout = 0;
+ int handle = 0;
+ int status = 0;
+ u16 retval;
+ u16 opcode;
+ int ntc;
+
+ spin_lock_bh(&hw->cmq.csq.lock);
+
+ if (num > hclgevf_ring_space(&hw->cmq.csq)) {
+ spin_unlock_bh(&hw->cmq.csq.lock);
+ return -EBUSY;
+ }
+
+ /* Record the location of desc in the ring for this time
+ * which will be use for hardware to write back
+ */
+ ntc = hw->cmq.csq.next_to_use;
+ opcode = le16_to_cpu(desc[0].opcode);
+ while (handle < num) {
+ desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
+ *desc_to_use = desc[handle];
+ (hw->cmq.csq.next_to_use)++;
+ if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
+ hw->cmq.csq.next_to_use = 0;
+ handle++;
+ }
+
+ /* Write to hardware */
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG,
+ hw->cmq.csq.next_to_use);
+
+ /* If the command is sync, wait for the firmware to write back,
+ * if multi descriptors to be sent, use the first one to check
+ */
+ if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) {
+ do {
+ if (hclgevf_cmd_csq_done(hw))
+ break;
+ udelay(1);
+ timeout++;
+ } while (timeout < hw->cmq.tx_timeout);
+ }
+
+ if (hclgevf_cmd_csq_done(hw)) {
+ complete = true;
+ handle = 0;
+
+ while (handle < num) {
+ /* Get the result of hardware write back */
+ desc_to_use = &hw->cmq.csq.desc[ntc];
+ desc[handle] = *desc_to_use;
+
+ if (likely(!hclgevf_is_special_opcode(opcode)))
+ retval = le16_to_cpu(desc[handle].retval);
+ else
+ retval = le16_to_cpu(desc[0].retval);
+
+ if ((enum hclgevf_cmd_return_status)retval ==
+ HCLGEVF_CMD_EXEC_SUCCESS)
+ status = 0;
+ else
+ status = -EIO;
+ hw->cmq.last_status = (enum hclgevf_cmd_status)retval;
+ ntc++;
+ handle++;
+ if (ntc == hw->cmq.csq.desc_num)
+ ntc = 0;
+ }
+ }
+
+ if (!complete)
+ status = -EAGAIN;
+
+ /* Clean the command send queue */
+ handle = hclgevf_cmd_csq_clean(hw);
+ if (handle != num) {
+ dev_warn(&hdev->pdev->dev,
+ "cleaned %d, need to clean %d\n", handle, num);
+ }
+
+ spin_unlock_bh(&hw->cmq.csq.lock);
+
+ return status;
+}
+
+static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw,
+ u32 *version)
+{
+ struct hclgevf_query_version_cmd *resp;
+ struct hclgevf_desc desc;
+ int status;
+
+ resp = (struct hclgevf_query_version_cmd *)desc.data;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1);
+ status = hclgevf_cmd_send(hw, &desc, 1);
+ if (!status)
+ *version = le32_to_cpu(resp->firmware);
+
+ return status;
+}
+
+int hclgevf_cmd_init(struct hclgevf_dev *hdev)
+{
+ u32 version;
+ int ret;
+
+ /* setup Tx write back timeout */
+ hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT;
+
+ /* setup queue CSQ/CRQ rings */
+ hdev->hw.cmq.csq.flag = HCLGEVF_TYPE_CSQ;
+ ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.csq);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize CSQ ring\n", ret);
+ return ret;
+ }
+
+ hdev->hw.cmq.crq.flag = HCLGEVF_TYPE_CRQ;
+ ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.crq);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize CRQ ring\n", ret);
+ goto err_csq;
+ }
+
+ /* get firmware version */
+ ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to query firmware version\n", ret);
+ goto err_crq;
+ }
+ hdev->fw_version = version;
+
+ dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
+
+ return 0;
+err_crq:
+ hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
+err_csq:
+ hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
+
+ return ret;
+}
+
+void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
+{
+ hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
+ hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
new file mode 100644
index 000000000000..2caca9317f8c
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -0,0 +1,248 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGEVF_CMD_H
+#define __HCLGEVF_CMD_H
+#include <linux/io.h>
+#include <linux/types.h>
+#include "hnae3.h"
+
+#define HCLGEVF_CMDQ_TX_TIMEOUT 200
+#define HCLGEVF_CMDQ_RX_INVLD_B 0
+#define HCLGEVF_CMDQ_RX_OUTVLD_B 1
+
+struct hclgevf_hw;
+struct hclgevf_dev;
+
+struct hclgevf_desc {
+ __le16 opcode;
+ __le16 flag;
+ __le16 retval;
+ __le16 rsv;
+ __le32 data[6];
+};
+
+struct hclgevf_desc_cb {
+ dma_addr_t dma;
+ void *va;
+ u32 length;
+};
+
+struct hclgevf_cmq_ring {
+ dma_addr_t desc_dma_addr;
+ struct hclgevf_desc *desc;
+ struct hclgevf_desc_cb *desc_cb;
+ struct hclgevf_dev *dev;
+ u32 head;
+ u32 tail;
+
+ u16 buf_size;
+ u16 desc_num;
+ int next_to_use;
+ int next_to_clean;
+ u8 flag;
+ spinlock_t lock; /* Command queue lock */
+};
+
+enum hclgevf_cmd_return_status {
+ HCLGEVF_CMD_EXEC_SUCCESS = 0,
+ HCLGEVF_CMD_NO_AUTH = 1,
+ HCLGEVF_CMD_NOT_EXEC = 2,
+ HCLGEVF_CMD_QUEUE_FULL = 3,
+};
+
+enum hclgevf_cmd_status {
+ HCLGEVF_STATUS_SUCCESS = 0,
+ HCLGEVF_ERR_CSQ_FULL = -1,
+ HCLGEVF_ERR_CSQ_TIMEOUT = -2,
+ HCLGEVF_ERR_CSQ_ERROR = -3
+};
+
+struct hclgevf_cmq {
+ struct hclgevf_cmq_ring csq;
+ struct hclgevf_cmq_ring crq;
+ u16 tx_timeout; /* Tx timeout */
+ enum hclgevf_cmd_status last_status;
+};
+
+#define HCLGEVF_CMD_FLAG_IN_VALID_SHIFT 0
+#define HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT 1
+#define HCLGEVF_CMD_FLAG_NEXT_SHIFT 2
+#define HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT 3
+#define HCLGEVF_CMD_FLAG_NO_INTR_SHIFT 4
+#define HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT 5
+
+#define HCLGEVF_CMD_FLAG_IN BIT(HCLGEVF_CMD_FLAG_IN_VALID_SHIFT)
+#define HCLGEVF_CMD_FLAG_OUT BIT(HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT)
+#define HCLGEVF_CMD_FLAG_NEXT BIT(HCLGEVF_CMD_FLAG_NEXT_SHIFT)
+#define HCLGEVF_CMD_FLAG_WR BIT(HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT)
+#define HCLGEVF_CMD_FLAG_NO_INTR BIT(HCLGEVF_CMD_FLAG_NO_INTR_SHIFT)
+#define HCLGEVF_CMD_FLAG_ERR_INTR BIT(HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT)
+
+enum hclgevf_opcode_type {
+ /* Generic command */
+ HCLGEVF_OPC_QUERY_FW_VER = 0x0001,
+ /* TQP command */
+ HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03,
+ HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13,
+ HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
+ /* RSS cmd */
+ HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01,
+ HCLGEVF_OPC_RSS_INDIR_TABLE = 0x0D07,
+ HCLGEVF_OPC_RSS_TC_MODE = 0x0D08,
+ /* Mailbox cmd */
+ HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001,
+};
+
+#define HCLGEVF_TQP_REG_OFFSET 0x80000
+#define HCLGEVF_TQP_REG_SIZE 0x200
+
+struct hclgevf_tqp_map {
+ __le16 tqp_id; /* Absolute tqp id for in this pf */
+ u8 tqp_vf; /* VF id */
+#define HCLGEVF_TQP_MAP_TYPE_PF 0
+#define HCLGEVF_TQP_MAP_TYPE_VF 1
+#define HCLGEVF_TQP_MAP_TYPE_B 0
+#define HCLGEVF_TQP_MAP_EN_B 1
+ u8 tqp_flag; /* Indicate it's pf or vf tqp */
+ __le16 tqp_vid; /* Virtual id in this pf/vf */
+ u8 rsv[18];
+};
+
+#define HCLGEVF_VECTOR_ELEMENTS_PER_CMD 10
+
+enum hclgevf_int_type {
+ HCLGEVF_INT_TX = 0,
+ HCLGEVF_INT_RX,
+ HCLGEVF_INT_EVENT,
+};
+
+struct hclgevf_ctrl_vector_chain {
+ u8 int_vector_id;
+ u8 int_cause_num;
+#define HCLGEVF_INT_TYPE_S 0
+#define HCLGEVF_INT_TYPE_M 0x3
+#define HCLGEVF_TQP_ID_S 2
+#define HCLGEVF_TQP_ID_M (0x3fff << HCLGEVF_TQP_ID_S)
+ __le16 tqp_type_and_id[HCLGEVF_VECTOR_ELEMENTS_PER_CMD];
+ u8 vfid;
+ u8 resv;
+};
+
+struct hclgevf_query_version_cmd {
+ __le32 firmware;
+ __le32 firmware_rsv[5];
+};
+
+#define HCLGEVF_RSS_HASH_KEY_OFFSET 4
+#define HCLGEVF_RSS_HASH_KEY_NUM 16
+struct hclgevf_rss_config_cmd {
+ u8 hash_config;
+ u8 rsv[7];
+ u8 hash_key[HCLGEVF_RSS_HASH_KEY_NUM];
+};
+
+struct hclgevf_rss_input_tuple_cmd {
+ u8 ipv4_tcp_en;
+ u8 ipv4_udp_en;
+ u8 ipv4_stcp_en;
+ u8 ipv4_fragment_en;
+ u8 ipv6_tcp_en;
+ u8 ipv6_udp_en;
+ u8 ipv6_stcp_en;
+ u8 ipv6_fragment_en;
+ u8 rsv[16];
+};
+
+#define HCLGEVF_RSS_CFG_TBL_SIZE 16
+
+struct hclgevf_rss_indirection_table_cmd {
+ u16 start_table_index;
+ u16 rss_set_bitmap;
+ u8 rsv[4];
+ u8 rss_result[HCLGEVF_RSS_CFG_TBL_SIZE];
+};
+
+#define HCLGEVF_RSS_TC_OFFSET_S 0
+#define HCLGEVF_RSS_TC_OFFSET_M (0x3ff << HCLGEVF_RSS_TC_OFFSET_S)
+#define HCLGEVF_RSS_TC_SIZE_S 12
+#define HCLGEVF_RSS_TC_SIZE_M (0x7 << HCLGEVF_RSS_TC_SIZE_S)
+#define HCLGEVF_RSS_TC_VALID_B 15
+#define HCLGEVF_MAX_TC_NUM 8
+struct hclgevf_rss_tc_mode_cmd {
+ u16 rss_tc_mode[HCLGEVF_MAX_TC_NUM];
+ u8 rsv[8];
+};
+
+#define HCLGEVF_LINK_STS_B 0
+#define HCLGEVF_LINK_STATUS BIT(HCLGEVF_LINK_STS_B)
+struct hclgevf_link_status_cmd {
+ u8 status;
+ u8 rsv[23];
+};
+
+#define HCLGEVF_RING_ID_MASK 0x3ff
+#define HCLGEVF_TQP_ENABLE_B 0
+
+struct hclgevf_cfg_com_tqp_queue_cmd {
+ __le16 tqp_id;
+ __le16 stream_id;
+ u8 enable;
+ u8 rsv[19];
+};
+
+struct hclgevf_cfg_tx_queue_pointer_cmd {
+ __le16 tqp_id;
+ __le16 tx_tail;
+ __le16 tx_head;
+ __le16 fbd_num;
+ __le16 ring_offset;
+ u8 rsv[14];
+};
+
+#define HCLGEVF_TYPE_CRQ 0
+#define HCLGEVF_TYPE_CSQ 1
+#define HCLGEVF_NIC_CSQ_BASEADDR_L_REG 0x27000
+#define HCLGEVF_NIC_CSQ_BASEADDR_H_REG 0x27004
+#define HCLGEVF_NIC_CSQ_DEPTH_REG 0x27008
+#define HCLGEVF_NIC_CSQ_TAIL_REG 0x27010
+#define HCLGEVF_NIC_CSQ_HEAD_REG 0x27014
+#define HCLGEVF_NIC_CRQ_BASEADDR_L_REG 0x27018
+#define HCLGEVF_NIC_CRQ_BASEADDR_H_REG 0x2701c
+#define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020
+#define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024
+#define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028
+#define HCLGEVF_NIC_CMQ_EN_B 16
+#define HCLGEVF_NIC_CMQ_ENABLE BIT(HCLGEVF_NIC_CMQ_EN_B)
+#define HCLGEVF_NIC_CMQ_DESC_NUM 1024
+#define HCLGEVF_NIC_CMQ_DESC_NUM_S 3
+#define HCLGEVF_NIC_CMDQ_INT_SRC_REG 0x27100
+
+static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value)
+{
+ writel(value, base + reg);
+}
+
+static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg)
+{
+ u8 __iomem *reg_addr = READ_ONCE(base);
+
+ return readl(reg_addr + reg);
+}
+
+#define hclgevf_write_dev(a, reg, value) \
+ hclgevf_write_reg((a)->io_base, (reg), (value))
+#define hclgevf_read_dev(a, reg) \
+ hclgevf_read_reg((a)->io_base, (reg))
+
+#define HCLGEVF_SEND_SYNC(flag) \
+ ((flag) & HCLGEVF_CMD_FLAG_NO_INTR)
+
+int hclgevf_cmd_init(struct hclgevf_dev *hdev);
+void hclgevf_cmd_uninit(struct hclgevf_dev *hdev);
+
+int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num);
+void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
+ enum hclgevf_opcode_type opcode,
+ bool is_read);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
new file mode 100644
index 000000000000..0d89965f7928
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -0,0 +1,1505 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include <linux/etherdevice.h>
+#include "hclgevf_cmd.h"
+#include "hclgevf_main.h"
+#include "hclge_mbx.h"
+#include "hnae3.h"
+
+#define HCLGEVF_NAME "hclgevf"
+
+static struct hnae3_ae_algo ae_algovf;
+
+static const struct pci_device_id ae_algovf_pci_tbl[] = {
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
+ /* required last entry */
+ {0, }
+};
+
+static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
+ struct hnae3_handle *handle)
+{
+ return container_of(handle, struct hclgevf_dev, nic);
+}
+
+static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_queue *queue;
+ struct hclgevf_desc desc;
+ struct hclgevf_tqp *tqp;
+ int status;
+ int i;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ queue = handle->kinfo.tqp[i];
+ tqp = container_of(queue, struct hclgevf_tqp, q);
+ hclgevf_cmd_setup_basic_desc(&desc,
+ HCLGEVF_OPC_QUERY_RX_STATUS,
+ true);
+
+ desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Query tqp stat fail, status = %d,queue = %d\n",
+ status, i);
+ return status;
+ }
+ tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
+ le32_to_cpu(desc.data[1]);
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
+ true);
+
+ desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Query tqp stat fail, status = %d,queue = %d\n",
+ status, i);
+ return status;
+ }
+ tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
+ le32_to_cpu(desc.data[1]);
+ }
+
+ return 0;
+}
+
+static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_tqp *tqp;
+ u64 *buff = data;
+ int i;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
+ *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
+ }
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
+ *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
+ }
+
+ return buff;
+}
+
+static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hdev->num_tqps * 2;
+}
+
+static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 *buff = data;
+ int i = 0;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
+ struct hclgevf_tqp, q);
+ snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
+ tqp->index);
+ buff += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
+ struct hclgevf_tqp, q);
+ snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
+ tqp->index);
+ buff += ETH_GSTRING_LEN;
+ }
+
+ return buff;
+}
+
+static void hclgevf_update_stats(struct hnae3_handle *handle,
+ struct net_device_stats *net_stats)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int status;
+
+ status = hclgevf_tqps_update_stats(handle);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "VF update of TQPS stats fail, status = %d.\n",
+ status);
+}
+
+static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
+{
+ if (strset == ETH_SS_TEST)
+ return -EOPNOTSUPP;
+ else if (strset == ETH_SS_STATS)
+ return hclgevf_tqps_get_sset_count(handle, strset);
+
+ return 0;
+}
+
+static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
+ u8 *data)
+{
+ u8 *p = (char *)data;
+
+ if (strset == ETH_SS_STATS)
+ p = hclgevf_tqps_get_strings(handle, p);
+}
+
+static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
+{
+ hclgevf_tqps_get_stats(handle, data);
+}
+
+static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
+{
+ u8 resp_msg;
+ int status;
+
+ status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
+ true, &resp_msg, sizeof(u8));
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "VF request to get TC info from PF failed %d",
+ status);
+ return status;
+ }
+
+ hdev->hw_tc_map = resp_msg;
+
+ return 0;
+}
+
+static int hclge_get_queue_info(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_TQPS_RSS_INFO_LEN 8
+ u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
+ int status;
+
+ status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0,
+ true, resp_msg,
+ HCLGEVF_TQPS_RSS_INFO_LEN);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "VF request to get tqp info from PF failed %d",
+ status);
+ return status;
+ }
+
+ memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16));
+ memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16));
+ memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16));
+ memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16));
+
+ return 0;
+}
+
+static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_tqp *tqp;
+ int i;
+
+ hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
+ sizeof(struct hclgevf_tqp), GFP_KERNEL);
+ if (!hdev->htqp)
+ return -ENOMEM;
+
+ tqp = hdev->htqp;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ tqp->dev = &hdev->pdev->dev;
+ tqp->index = i;
+
+ tqp->q.ae_algo = &ae_algovf;
+ tqp->q.buf_size = hdev->rx_buf_len;
+ tqp->q.desc_num = hdev->num_desc;
+ tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET +
+ i * HCLGEVF_TQP_REG_SIZE;
+
+ tqp++;
+ }
+
+ return 0;
+}
+
+static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+ struct hnae3_knic_private_info *kinfo;
+ u16 new_tqps = hdev->num_tqps;
+ int i;
+
+ kinfo = &nic->kinfo;
+ kinfo->num_tc = 0;
+ kinfo->num_desc = hdev->num_desc;
+ kinfo->rx_buf_len = hdev->rx_buf_len;
+ for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
+ if (hdev->hw_tc_map & BIT(i))
+ kinfo->num_tc++;
+
+ kinfo->rss_size
+ = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc);
+ new_tqps = kinfo->rss_size * kinfo->num_tc;
+ kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
+
+ kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
+ sizeof(struct hnae3_queue *), GFP_KERNEL);
+ if (!kinfo->tqp)
+ return -ENOMEM;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ hdev->htqp[i].q.handle = &hdev->nic;
+ hdev->htqp[i].q.tqp_index = i;
+ kinfo->tqp[i] = &hdev->htqp[i].q;
+ }
+
+ return 0;
+}
+
+static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
+{
+ int status;
+ u8 resp_msg;
+
+ status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
+ 0, false, &resp_msg, sizeof(u8));
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "VF failed to fetch link status(%d) from PF", status);
+}
+
+void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
+{
+ struct hnae3_handle *handle = &hdev->nic;
+ struct hnae3_client *client;
+
+ client = handle->client;
+
+ if (link_state != hdev->hw.mac.link) {
+ client->ops->link_status_change(handle, !!link_state);
+ hdev->hw.mac.link = link_state;
+ }
+}
+
+static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+ int ret;
+
+ nic->ae_algo = &ae_algovf;
+ nic->pdev = hdev->pdev;
+ nic->numa_node_mask = hdev->numa_node_mask;
+ nic->flags |= HNAE3_SUPPORT_VF;
+
+ if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) {
+ dev_err(&hdev->pdev->dev, "unsupported device type %d\n",
+ hdev->ae_dev->dev_type);
+ return -EINVAL;
+ }
+
+ ret = hclgevf_knic_setup(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
+ ret);
+ return ret;
+}
+
+static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
+{
+ hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
+ hdev->num_msi_left += 1;
+ hdev->num_msi_used -= 1;
+}
+
+static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
+ struct hnae3_vector_info *vector_info)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_vector_info *vector = vector_info;
+ int alloc = 0;
+ int i, j;
+
+ vector_num = min(hdev->num_msi_left, vector_num);
+
+ for (j = 0; j < vector_num; j++) {
+ for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
+ if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
+ vector->vector = pci_irq_vector(hdev->pdev, i);
+ vector->io_addr = hdev->hw.io_base +
+ HCLGEVF_VECTOR_REG_BASE +
+ (i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
+ hdev->vector_status[i] = 0;
+ hdev->vector_irq[i] = vector->vector;
+
+ vector++;
+ alloc++;
+
+ break;
+ }
+ }
+ }
+ hdev->num_msi_left -= alloc;
+ hdev->num_msi_used += alloc;
+
+ return alloc;
+}
+
+static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
+{
+ int i;
+
+ for (i = 0; i < hdev->num_msi; i++)
+ if (vector == hdev->vector_irq[i])
+ return i;
+
+ return -EINVAL;
+}
+
+static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
+{
+ return HCLGEVF_RSS_KEY_SIZE;
+}
+
+static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle)
+{
+ return HCLGEVF_RSS_IND_TBL_SIZE;
+}
+
+static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
+{
+ const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
+ struct hclgevf_rss_indirection_table_cmd *req;
+ struct hclgevf_desc desc;
+ int status;
+ int i, j;
+
+ req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
+
+ for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) {
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
+ false);
+ req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE;
+ req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK;
+ for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
+ req->rss_result[j] =
+ indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
+
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "VF failed(=%d) to set RSS indirection table\n",
+ status);
+ return status;
+ }
+ }
+
+ return 0;
+}
+
+static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
+{
+ struct hclgevf_rss_tc_mode_cmd *req;
+ u16 tc_offset[HCLGEVF_MAX_TC_NUM];
+ u16 tc_valid[HCLGEVF_MAX_TC_NUM];
+ u16 tc_size[HCLGEVF_MAX_TC_NUM];
+ struct hclgevf_desc desc;
+ u16 roundup_size;
+ int status;
+ int i;
+
+ req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
+
+ roundup_size = roundup_pow_of_two(rss_size);
+ roundup_size = ilog2(roundup_size);
+
+ for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
+ tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
+ tc_size[i] = roundup_size;
+ tc_offset[i] = rss_size * i;
+ }
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
+ for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
+ hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
+ (tc_valid[i] & 0x1));
+ hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
+ HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
+ hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
+ HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
+ }
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "VF failed(=%d) to set rss tc mode\n", status);
+
+ return status;
+}
+
+static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash,
+ u8 *key)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_config_cmd *req;
+ int lkup_times = key ? 3 : 1;
+ struct hclgevf_desc desc;
+ int key_offset;
+ int key_size;
+ int status;
+
+ req = (struct hclgevf_rss_config_cmd *)desc.data;
+ lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0);
+
+ for (key_offset = 0; key_offset < lkup_times; key_offset++) {
+ hclgevf_cmd_setup_basic_desc(&desc,
+ HCLGEVF_OPC_RSS_GENERIC_CONFIG,
+ true);
+ req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET);
+
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get hardware RSS cfg, status = %d\n",
+ status);
+ return status;
+ }
+
+ if (key_offset == 2)
+ key_size =
+ HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
+ else
+ key_size = HCLGEVF_RSS_HASH_KEY_NUM;
+
+ if (key)
+ memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM,
+ req->hash_key,
+ key_size);
+ }
+
+ if (hash) {
+ if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ)
+ *hash = ETH_RSS_HASH_TOP;
+ else
+ *hash = ETH_RSS_HASH_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ int i;
+
+ if (indir)
+ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ indir[i] = rss_cfg->rss_indirection_tbl[i];
+
+ return hclgevf_get_rss_hw_cfg(handle, hfunc, key);
+}
+
+static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ int i;
+
+ /* update the shadow RSS table with user specified qids */
+ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ rss_cfg->rss_indirection_tbl[i] = indir[i];
+
+ /* update the hardware */
+ return hclgevf_set_rss_indir_table(hdev);
+}
+
+static int hclgevf_get_tc_size(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+
+ return rss_cfg->rss_size;
+}
+
+static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
+ int vector,
+ struct hnae3_ring_chain_node *ring_chain)
+{
+#define HCLGEVF_RING_NODE_VARIABLE_NUM 3
+#define HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM 3
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_ring_chain_node *node;
+ struct hclge_mbx_vf_to_pf_cmd *req;
+ struct hclgevf_desc desc;
+ int i, vector_id;
+ int status;
+ u8 type;
+
+ req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
+ vector_id = hclgevf_get_vector_index(hdev, vector);
+ if (vector_id < 0) {
+ dev_err(&handle->pdev->dev,
+ "Get vector index fail. ret =%d\n", vector_id);
+ return vector_id;
+ }
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
+ type = en ?
+ HCLGE_MBX_MAP_RING_TO_VECTOR : HCLGE_MBX_UNMAP_RING_TO_VECTOR;
+ req->msg[0] = type;
+ req->msg[1] = vector_id; /* vector_id should be id in VF */
+
+ i = 0;
+ for (node = ring_chain; node; node = node->next) {
+ i++;
+ /* msg[2] is cause num */
+ req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i] =
+ hnae_get_bit(node->flag, HNAE3_RING_TYPE_B);
+ req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i + 1] =
+ node->tqp_index;
+ req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i + 2] =
+ hnae_get_field(node->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S);
+
+ if (i == (HCLGE_MBX_VF_MSG_DATA_NUM -
+ HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM) /
+ HCLGEVF_RING_NODE_VARIABLE_NUM) {
+ req->msg[2] = i;
+
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Map TQP fail, status is %d.\n",
+ status);
+ return status;
+ }
+ i = 0;
+ hclgevf_cmd_setup_basic_desc(&desc,
+ HCLGEVF_OPC_MBX_VF_TO_PF,
+ false);
+ req->msg[0] = type;
+ req->msg[1] = vector_id;
+ }
+ }
+
+ if (i > 0) {
+ req->msg[2] = i;
+
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Map TQP fail, status is %d.\n", status);
+ return status;
+ }
+ }
+
+ return 0;
+}
+
+static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
+ struct hnae3_ring_chain_node *ring_chain)
+{
+ return hclgevf_bind_ring_to_vector(handle, true, vector, ring_chain);
+}
+
+static int hclgevf_unmap_ring_from_vector(
+ struct hnae3_handle *handle,
+ int vector,
+ struct hnae3_ring_chain_node *ring_chain)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int ret, vector_id;
+
+ vector_id = hclgevf_get_vector_index(hdev, vector);
+ if (vector_id < 0) {
+ dev_err(&handle->pdev->dev,
+ "Get vector index fail. ret =%d\n", vector_id);
+ return vector_id;
+ }
+
+ ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain);
+ if (ret) {
+ dev_err(&handle->pdev->dev,
+ "Unmap ring from vector fail. vector=%d, ret =%d\n",
+ vector_id,
+ ret);
+ return ret;
+ }
+
+ hclgevf_free_vector(hdev, vector);
+
+ return 0;
+}
+
+static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en)
+{
+ struct hclge_mbx_vf_to_pf_cmd *req;
+ struct hclgevf_desc desc;
+ int status;
+
+ req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
+ req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
+ req->msg[1] = en;
+
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Set promisc mode fail, status is %d.\n", status);
+
+ return status;
+}
+
+static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, u32 en)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ hclgevf_cmd_set_promisc_mode(hdev, en);
+}
+
+static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
+ int stream_id, bool enable)
+{
+ struct hclgevf_cfg_com_tqp_queue_cmd *req;
+ struct hclgevf_desc desc;
+ int status;
+
+ req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
+ false);
+ req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
+ req->stream_id = cpu_to_le16(stream_id);
+ req->enable |= enable << HCLGEVF_TQP_ENABLE_B;
+
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "TQP enable fail, status =%d.\n", status);
+
+ return status;
+}
+
+static int hclgevf_get_queue_id(struct hnae3_queue *queue)
+{
+ struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q);
+
+ return tqp->index;
+}
+
+static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_queue *queue;
+ struct hclgevf_tqp *tqp;
+ int i;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ queue = handle->kinfo.tqp[i];
+ tqp = container_of(queue, struct hclgevf_tqp, q);
+ memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
+ }
+}
+
+static int hclgevf_cfg_func_mta_filter(struct hnae3_handle *handle, bool en)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 msg[2] = {0};
+
+ msg[0] = en;
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
+ HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE,
+ msg, 1, false, NULL, 0);
+}
+
+static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ ether_addr_copy(p, hdev->hw.mac.mac_addr);
+}
+
+static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
+ u8 *new_mac_addr = (u8 *)p;
+ u8 msg_data[ETH_ALEN * 2];
+ int status;
+
+ ether_addr_copy(msg_data, new_mac_addr);
+ ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr);
+
+ status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
+ HCLGE_MBX_MAC_VLAN_UC_MODIFY,
+ msg_data, ETH_ALEN * 2,
+ false, NULL, 0);
+ if (!status)
+ ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
+
+ return status;
+}
+
+static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
+ HCLGE_MBX_MAC_VLAN_UC_ADD,
+ addr, ETH_ALEN, false, NULL, 0);
+}
+
+static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
+ HCLGE_MBX_MAC_VLAN_UC_REMOVE,
+ addr, ETH_ALEN, false, NULL, 0);
+}
+
+static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
+ HCLGE_MBX_MAC_VLAN_MC_ADD,
+ addr, ETH_ALEN, false, NULL, 0);
+}
+
+static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
+ HCLGE_MBX_MAC_VLAN_MC_REMOVE,
+ addr, ETH_ALEN, false, NULL, 0);
+}
+
+static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
+ __be16 proto, u16 vlan_id,
+ bool is_kill)
+{
+#define HCLGEVF_VLAN_MBX_MSG_LEN 5
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
+
+ if (vlan_id > 4095)
+ return -EINVAL;
+
+ if (proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
+ msg_data[0] = is_kill;
+ memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
+ memcpy(&msg_data[3], &proto, sizeof(proto));
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_VLAN_FILTER, msg_data,
+ HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
+}
+
+static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 msg_data[2];
+
+ memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
+
+ hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 2, false,
+ NULL, 0);
+}
+
+static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hdev->fw_version;
+}
+
+static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_misc_vector *vector = &hdev->misc_vector;
+
+ vector->vector_irq = pci_irq_vector(hdev->pdev,
+ HCLGEVF_MISC_VECTOR_NUM);
+ vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
+ /* vector status always valid for Vector 0 */
+ hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
+ hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
+
+ hdev->num_msi_left -= 1;
+ hdev->num_msi_used += 1;
+}
+
+static void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
+{
+ if (!test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
+ schedule_work(&hdev->mbx_service_task);
+}
+
+static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
+{
+ if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) &&
+ !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state))
+ schedule_work(&hdev->service_task);
+}
+
+static void hclgevf_service_timer(struct timer_list *t)
+{
+ struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
+
+ mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
+
+ hclgevf_task_schedule(hdev);
+}
+
+static void hclgevf_mailbox_service_task(struct work_struct *work)
+{
+ struct hclgevf_dev *hdev;
+
+ hdev = container_of(work, struct hclgevf_dev, mbx_service_task);
+
+ if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
+ return;
+
+ clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
+
+ hclgevf_mbx_handler(hdev);
+
+ clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
+}
+
+static void hclgevf_service_task(struct work_struct *work)
+{
+ struct hclgevf_dev *hdev;
+
+ hdev = container_of(work, struct hclgevf_dev, service_task);
+
+ /* request the link status from the PF. PF would be able to tell VF
+ * about such updates in future so we might remove this later
+ */
+ hclgevf_request_link_info(hdev);
+
+ clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
+}
+
+static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
+{
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
+}
+
+static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval)
+{
+ u32 cmdq_src_reg;
+
+ /* fetch the events from their corresponding regs */
+ cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
+ HCLGEVF_VECTOR0_CMDQ_SRC_REG);
+
+ /* check for vector0 mailbox(=CMDQ RX) event source */
+ if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
+ cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
+ *clearval = cmdq_src_reg;
+ return true;
+ }
+
+ dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
+
+ return false;
+}
+
+static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
+{
+ writel(en ? 1 : 0, vector->addr);
+}
+
+static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
+{
+ struct hclgevf_dev *hdev = data;
+ u32 clearval;
+
+ hclgevf_enable_vector(&hdev->misc_vector, false);
+ if (!hclgevf_check_event_cause(hdev, &clearval))
+ goto skip_sched;
+
+ /* schedule the VF mailbox service task, if not already scheduled */
+ hclgevf_mbx_task_schedule(hdev);
+
+ hclgevf_clear_event_cause(hdev, clearval);
+
+skip_sched:
+ hclgevf_enable_vector(&hdev->misc_vector, true);
+
+ return IRQ_HANDLED;
+}
+
+static int hclgevf_configure(struct hclgevf_dev *hdev)
+{
+ int ret;
+
+ /* get queue configuration from PF */
+ ret = hclge_get_queue_info(hdev);
+ if (ret)
+ return ret;
+ /* get tc configuration from PF */
+ return hclgevf_get_tc_info(hdev);
+}
+
+static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *roce = &hdev->roce;
+ struct hnae3_handle *nic = &hdev->nic;
+
+ roce->rinfo.num_vectors = HCLGEVF_ROCEE_VECTOR_NUM;
+
+ if (hdev->num_msi_left < roce->rinfo.num_vectors ||
+ hdev->num_msi_left == 0)
+ return -EINVAL;
+
+ roce->rinfo.base_vector =
+ hdev->vector_status[hdev->num_msi_used];
+
+ roce->rinfo.netdev = nic->kinfo.netdev;
+ roce->rinfo.roce_io_base = hdev->hw.io_base;
+
+ roce->pdev = nic->pdev;
+ roce->ae_algo = nic->ae_algo;
+ roce->numa_node_mask = nic->numa_node_mask;
+
+ return 0;
+}
+
+static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ int i, ret;
+
+ rss_cfg->rss_size = hdev->rss_size_max;
+
+ /* Initialize RSS indirect table for each vport */
+ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
+
+ ret = hclgevf_set_rss_indir_table(hdev);
+ if (ret)
+ return ret;
+
+ return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max);
+}
+
+static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
+{
+ /* other vlan config(like, VLAN TX/RX offload) would also be added
+ * here later
+ */
+ return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
+ false);
+}
+
+static int hclgevf_ae_start(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int i, queue_id;
+
+ for (i = 0; i < handle->kinfo.num_tqps; i++) {
+ /* ring enable */
+ queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
+ if (queue_id < 0) {
+ dev_warn(&hdev->pdev->dev,
+ "Get invalid queue id, ignore it\n");
+ continue;
+ }
+
+ hclgevf_tqp_enable(hdev, queue_id, 0, true);
+ }
+
+ /* reset tqp stats */
+ hclgevf_reset_tqp_stats(handle);
+
+ hclgevf_request_link_info(hdev);
+
+ clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+ mod_timer(&hdev->service_timer, jiffies + HZ);
+
+ return 0;
+}
+
+static void hclgevf_ae_stop(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int i, queue_id;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ /* Ring disable */
+ queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
+ if (queue_id < 0) {
+ dev_warn(&hdev->pdev->dev,
+ "Get invalid queue id, ignore it\n");
+ continue;
+ }
+
+ hclgevf_tqp_enable(hdev, queue_id, 0, false);
+ }
+
+ /* reset tqp stats */
+ hclgevf_reset_tqp_stats(handle);
+}
+
+static void hclgevf_state_init(struct hclgevf_dev *hdev)
+{
+ /* setup tasks for the MBX */
+ INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
+ clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
+ clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
+
+ /* setup tasks for service timer */
+ timer_setup(&hdev->service_timer, hclgevf_service_timer, 0);
+
+ INIT_WORK(&hdev->service_task, hclgevf_service_task);
+ clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
+
+ mutex_init(&hdev->mbx_resp.mbx_mutex);
+
+ /* bring the device down */
+ set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+}
+
+static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
+{
+ set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+
+ if (hdev->service_timer.function)
+ del_timer_sync(&hdev->service_timer);
+ if (hdev->service_task.func)
+ cancel_work_sync(&hdev->service_task);
+ if (hdev->mbx_service_task.func)
+ cancel_work_sync(&hdev->mbx_service_task);
+
+ mutex_destroy(&hdev->mbx_resp.mbx_mutex);
+}
+
+static int hclgevf_init_msi(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int vectors;
+ int i;
+
+ hdev->num_msi = HCLGEVF_MAX_VF_VECTOR_NUM;
+
+ vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
+ PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ if (vectors < 0) {
+ dev_err(&pdev->dev,
+ "failed(%d) to allocate MSI/MSI-X vectors\n",
+ vectors);
+ return vectors;
+ }
+ if (vectors < hdev->num_msi)
+ dev_warn(&hdev->pdev->dev,
+ "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
+ hdev->num_msi, vectors);
+
+ hdev->num_msi = vectors;
+ hdev->num_msi_left = vectors;
+ hdev->base_msi_vector = pdev->irq;
+
+ hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
+ sizeof(u16), GFP_KERNEL);
+ if (!hdev->vector_status) {
+ pci_free_irq_vectors(pdev);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < hdev->num_msi; i++)
+ hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
+
+ hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
+ sizeof(int), GFP_KERNEL);
+ if (!hdev->vector_irq) {
+ pci_free_irq_vectors(pdev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ pci_free_irq_vectors(pdev);
+}
+
+static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
+{
+ int ret = 0;
+
+ hclgevf_get_misc_vector(hdev);
+
+ ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
+ 0, "hclgevf_cmd", hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
+ hdev->misc_vector.vector_irq);
+ return ret;
+ }
+
+ /* enable misc. vector(vector 0) */
+ hclgevf_enable_vector(&hdev->misc_vector, true);
+
+ return ret;
+}
+
+static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
+{
+ /* disable misc vector(vector 0) */
+ hclgevf_enable_vector(&hdev->misc_vector, false);
+ free_irq(hdev->misc_vector.vector_irq, hdev);
+ hclgevf_free_vector(hdev, 0);
+}
+
+static int hclgevf_init_instance(struct hclgevf_dev *hdev,
+ struct hnae3_client *client)
+{
+ int ret;
+
+ switch (client->type) {
+ case HNAE3_CLIENT_KNIC:
+ hdev->nic_client = client;
+ hdev->nic.client = client;
+
+ ret = client->ops->init_instance(&hdev->nic);
+ if (ret)
+ return ret;
+
+ if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
+ struct hnae3_client *rc = hdev->roce_client;
+
+ ret = hclgevf_init_roce_base_info(hdev);
+ if (ret)
+ return ret;
+ ret = rc->ops->init_instance(&hdev->roce);
+ if (ret)
+ return ret;
+ }
+ break;
+ case HNAE3_CLIENT_UNIC:
+ hdev->nic_client = client;
+ hdev->nic.client = client;
+
+ ret = client->ops->init_instance(&hdev->nic);
+ if (ret)
+ return ret;
+ break;
+ case HNAE3_CLIENT_ROCE:
+ hdev->roce_client = client;
+ hdev->roce.client = client;
+
+ if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
+ ret = hclgevf_init_roce_base_info(hdev);
+ if (ret)
+ return ret;
+
+ ret = client->ops->init_instance(&hdev->roce);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void hclgevf_uninit_instance(struct hclgevf_dev *hdev,
+ struct hnae3_client *client)
+{
+ /* un-init roce, if it exists */
+ if (hdev->roce_client)
+ hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
+
+ /* un-init nic/unic, if this was not called by roce client */
+ if ((client->ops->uninit_instance) &&
+ (client->type != HNAE3_CLIENT_ROCE))
+ client->ops->uninit_instance(&hdev->nic, 0);
+}
+
+static int hclgevf_register_client(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ return hclgevf_init_instance(hdev, client);
+}
+
+static void hclgevf_unregister_client(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ hclgevf_uninit_instance(hdev, client);
+}
+
+static int hclgevf_pci_init(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ struct hclgevf_hw *hw;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable PCI device\n");
+ goto err_no_drvdata;
+ }
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
+ goto err_disable_device;
+ }
+
+ ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
+ goto err_disable_device;
+ }
+
+ pci_set_master(pdev);
+ hw = &hdev->hw;
+ hw->hdev = hdev;
+ hw->io_base = pci_iomap(pdev, 2, 0);
+ if (!hw->io_base) {
+ dev_err(&pdev->dev, "can't map configuration register space\n");
+ ret = -ENOMEM;
+ goto err_clr_master;
+ }
+
+ return 0;
+
+err_clr_master:
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+err_no_drvdata:
+ pci_set_drvdata(pdev, NULL);
+ return ret;
+}
+
+static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ pci_iounmap(pdev, hdev->hw.io_base);
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+{
+ struct pci_dev *pdev = ae_dev->pdev;
+ struct hclgevf_dev *hdev;
+ int ret;
+
+ hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+
+ hdev->pdev = pdev;
+ hdev->ae_dev = ae_dev;
+ ae_dev->priv = hdev;
+
+ ret = hclgevf_pci_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "PCI initialization failed\n");
+ return ret;
+ }
+
+ ret = hclgevf_init_msi(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
+ goto err_irq_init;
+ }
+
+ hclgevf_state_init(hdev);
+
+ ret = hclgevf_misc_irq_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
+ ret);
+ goto err_misc_irq_init;
+ }
+
+ ret = hclgevf_cmd_init(hdev);
+ if (ret)
+ goto err_cmd_init;
+
+ ret = hclgevf_configure(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
+ goto err_config;
+ }
+
+ ret = hclgevf_alloc_tqps(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
+ goto err_config;
+ }
+
+ ret = hclgevf_set_handle_info(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret);
+ goto err_config;
+ }
+
+ /* Initialize VF's MTA */
+ hdev->accept_mta_mc = true;
+ ret = hclgevf_cfg_func_mta_filter(&hdev->nic, hdev->accept_mta_mc);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to set mta filter mode\n", ret);
+ goto err_config;
+ }
+
+ /* Initialize RSS for this VF */
+ ret = hclgevf_rss_init_hw(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize RSS\n", ret);
+ goto err_config;
+ }
+
+ ret = hclgevf_init_vlan_config(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize VLAN config\n", ret);
+ goto err_config;
+ }
+
+ pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
+
+ return 0;
+
+err_config:
+ hclgevf_cmd_uninit(hdev);
+err_cmd_init:
+ hclgevf_misc_irq_uninit(hdev);
+err_misc_irq_init:
+ hclgevf_state_uninit(hdev);
+ hclgevf_uninit_msi(hdev);
+err_irq_init:
+ hclgevf_pci_uninit(hdev);
+ return ret;
+}
+
+static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ hclgevf_cmd_uninit(hdev);
+ hclgevf_misc_irq_uninit(hdev);
+ hclgevf_state_uninit(hdev);
+ hclgevf_uninit_msi(hdev);
+ hclgevf_pci_uninit(hdev);
+ ae_dev->priv = NULL;
+}
+
+static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+ struct hnae3_knic_private_info *kinfo = &nic->kinfo;
+
+ return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
+}
+
+/**
+ * hclgevf_get_channels - Get the current channels enabled and max supported.
+ * @handle: hardware information for network interface
+ * @ch: ethtool channels structure
+ *
+ * We don't support separate tx and rx queues as channels. The other count
+ * represents how many queues are being used for control. max_combined counts
+ * how many queue pairs we can support. They may not be mapped 1 to 1 with
+ * q_vectors since we support a lot more queue pairs than q_vectors.
+ **/
+static void hclgevf_get_channels(struct hnae3_handle *handle,
+ struct ethtool_channels *ch)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ ch->max_combined = hclgevf_get_max_channels(hdev);
+ ch->other_count = 0;
+ ch->max_other = 0;
+ ch->combined_count = hdev->num_tqps;
+}
+
+static const struct hnae3_ae_ops hclgevf_ops = {
+ .init_ae_dev = hclgevf_init_ae_dev,
+ .uninit_ae_dev = hclgevf_uninit_ae_dev,
+ .init_client_instance = hclgevf_register_client,
+ .uninit_client_instance = hclgevf_unregister_client,
+ .start = hclgevf_ae_start,
+ .stop = hclgevf_ae_stop,
+ .map_ring_to_vector = hclgevf_map_ring_to_vector,
+ .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
+ .get_vector = hclgevf_get_vector,
+ .reset_queue = hclgevf_reset_tqp,
+ .set_promisc_mode = hclgevf_set_promisc_mode,
+ .get_mac_addr = hclgevf_get_mac_addr,
+ .set_mac_addr = hclgevf_set_mac_addr,
+ .add_uc_addr = hclgevf_add_uc_addr,
+ .rm_uc_addr = hclgevf_rm_uc_addr,
+ .add_mc_addr = hclgevf_add_mc_addr,
+ .rm_mc_addr = hclgevf_rm_mc_addr,
+ .get_stats = hclgevf_get_stats,
+ .update_stats = hclgevf_update_stats,
+ .get_strings = hclgevf_get_strings,
+ .get_sset_count = hclgevf_get_sset_count,
+ .get_rss_key_size = hclgevf_get_rss_key_size,
+ .get_rss_indir_size = hclgevf_get_rss_indir_size,
+ .get_rss = hclgevf_get_rss,
+ .set_rss = hclgevf_set_rss,
+ .get_tc_size = hclgevf_get_tc_size,
+ .get_fw_version = hclgevf_get_fw_version,
+ .set_vlan_filter = hclgevf_set_vlan_filter,
+ .get_channels = hclgevf_get_channels,
+};
+
+static struct hnae3_ae_algo ae_algovf = {
+ .ops = &hclgevf_ops,
+ .name = HCLGEVF_NAME,
+ .pdev_id_table = ae_algovf_pci_tbl,
+};
+
+static int hclgevf_init(void)
+{
+ pr_info("%s is initializing\n", HCLGEVF_NAME);
+
+ return hnae3_register_ae_algo(&ae_algovf);
+}
+
+static void hclgevf_exit(void)
+{
+ hnae3_unregister_ae_algo(&ae_algovf);
+}
+module_init(hclgevf_init);
+module_exit(hclgevf_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_DESCRIPTION("HCLGEVF Driver");
+MODULE_VERSION(HCLGEVF_MOD_VERSION);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
new file mode 100644
index 000000000000..a63bee4a3674
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGEVF_MAIN_H
+#define __HCLGEVF_MAIN_H
+#include <linux/fs.h>
+#include <linux/types.h>
+#include "hclge_mbx.h"
+#include "hclgevf_cmd.h"
+#include "hnae3.h"
+
+#define HCLGEVF_MOD_VERSION "v1.0"
+#define HCLGEVF_DRIVER_NAME "hclgevf"
+
+#define HCLGEVF_ROCEE_VECTOR_NUM 0
+#define HCLGEVF_MISC_VECTOR_NUM 0
+
+#define HCLGEVF_INVALID_VPORT 0xffff
+
+/* This number in actual depends upon the total number of VFs
+ * created by physical function. But the maximum number of
+ * possible vector-per-VF is {VFn(1-32), VECTn(32 + 1)}.
+ */
+#define HCLGEVF_MAX_VF_VECTOR_NUM (32 + 1)
+
+#define HCLGEVF_VECTOR_REG_BASE 0x20000
+#define HCLGEVF_MISC_VECTOR_REG_BASE 0x20400
+#define HCLGEVF_VECTOR_REG_OFFSET 0x4
+#define HCLGEVF_VECTOR_VF_OFFSET 0x100000
+
+/* Vector0 interrupt CMDQ event source register(RW) */
+#define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100
+/* CMDQ register bits for RX event(=MBX event) */
+#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1
+
+#define HCLGEVF_TQP_RESET_TRY_TIMES 10
+
+#define HCLGEVF_RSS_IND_TBL_SIZE 512
+#define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff
+#define HCLGEVF_RSS_KEY_SIZE 40
+#define HCLGEVF_RSS_HASH_ALGO_TOEPLITZ 0
+#define HCLGEVF_RSS_HASH_ALGO_SIMPLE 1
+#define HCLGEVF_RSS_HASH_ALGO_SYMMETRIC 2
+#define HCLGEVF_RSS_HASH_ALGO_MASK 0xf
+#define HCLGEVF_RSS_CFG_TBL_NUM \
+ (HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE)
+
+/* states of hclgevf device & tasks */
+enum hclgevf_states {
+ /* device states */
+ HCLGEVF_STATE_DOWN,
+ HCLGEVF_STATE_DISABLED,
+ /* task states */
+ HCLGEVF_STATE_SERVICE_SCHED,
+ HCLGEVF_STATE_MBX_SERVICE_SCHED,
+ HCLGEVF_STATE_MBX_HANDLING,
+};
+
+#define HCLGEVF_MPF_ENBALE 1
+
+struct hclgevf_mac {
+ u8 mac_addr[ETH_ALEN];
+ int link;
+};
+
+struct hclgevf_hw {
+ void __iomem *io_base;
+ int num_vec;
+ struct hclgevf_cmq cmq;
+ struct hclgevf_mac mac;
+ void *hdev; /* hchgevf device it is part of */
+};
+
+/* TQP stats */
+struct hlcgevf_tqp_stats {
+ /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */
+ u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
+ /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */
+ u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
+};
+
+struct hclgevf_tqp {
+ struct device *dev; /* device for DMA mapping */
+ struct hnae3_queue q;
+ struct hlcgevf_tqp_stats tqp_stats;
+ u16 index; /* global index in a NIC controller */
+
+ bool alloced;
+};
+
+struct hclgevf_cfg {
+ u8 vmdq_vport_num;
+ u8 tc_num;
+ u16 tqp_desc_num;
+ u16 rx_buf_len;
+ u8 phy_addr;
+ u8 media_type;
+ u8 mac_addr[ETH_ALEN];
+ u32 numa_node_map;
+};
+
+struct hclgevf_rss_cfg {
+ u8 rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */
+ u32 hash_algo;
+ u32 rss_size;
+ u8 hw_tc_map;
+ u8 rss_indirection_tbl[HCLGEVF_RSS_IND_TBL_SIZE]; /* shadow table */
+};
+
+struct hclgevf_misc_vector {
+ u8 __iomem *addr;
+ int vector_irq;
+};
+
+struct hclgevf_dev {
+ struct pci_dev *pdev;
+ struct hnae3_ae_dev *ae_dev;
+ struct hclgevf_hw hw;
+ struct hclgevf_misc_vector misc_vector;
+ struct hclgevf_rss_cfg rss_cfg;
+ unsigned long state;
+
+ u32 fw_version;
+ u16 num_tqps; /* num task queue pairs of this PF */
+
+ u16 alloc_rss_size; /* allocated RSS task queue */
+ u16 rss_size_max; /* HW defined max RSS task queue */
+
+ u16 num_alloc_vport; /* num vports this driver supports */
+ u32 numa_node_mask;
+ u16 rx_buf_len;
+ u16 num_desc;
+ u8 hw_tc_map;
+
+ u16 num_msi;
+ u16 num_msi_left;
+ u16 num_msi_used;
+ u32 base_msi_vector;
+ u16 *vector_status;
+ int *vector_irq;
+
+ bool accept_mta_mc; /* whether to accept mta filter multicast */
+ struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
+
+ struct timer_list service_timer;
+ struct work_struct service_task;
+ struct work_struct mbx_service_task;
+
+ struct hclgevf_tqp *htqp;
+
+ struct hnae3_handle nic;
+ struct hnae3_handle roce;
+
+ struct hnae3_client *nic_client;
+ struct hnae3_client *roce_client;
+ u32 flag;
+};
+
+int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
+ const u8 *msg_data, u8 msg_len, bool need_resp,
+ u8 *resp_data, u16 resp_len);
+void hclgevf_mbx_handler(struct hclgevf_dev *hdev);
+void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
new file mode 100644
index 000000000000..e39cad285fa9
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include "hclge_mbx.h"
+#include "hclgevf_main.h"
+#include "hnae3.h"
+
+static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
+{
+ /* this function should be called with mbx_resp.mbx_mutex held
+ * to prtect the received_response from race condition
+ */
+ hdev->mbx_resp.received_resp = false;
+ hdev->mbx_resp.origin_mbx_msg = 0;
+ hdev->mbx_resp.resp_status = 0;
+ memset(hdev->mbx_resp.additional_info, 0, HCLGE_MBX_MAX_RESP_DATA_SIZE);
+}
+
+/* hclgevf_get_mbx_resp: used to get a response from PF after VF sends a mailbox
+ * message to PF.
+ * @hdev: pointer to struct hclgevf_dev
+ * @resp_msg: pointer to store the original message type and response status
+ * @len: the resp_msg data array length.
+ */
+static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
+ u8 *resp_data, u16 resp_len)
+{
+#define HCLGEVF_MAX_TRY_TIMES 500
+#define HCLGEVF_SLEEP_USCOEND 1000
+ struct hclgevf_mbx_resp_status *mbx_resp;
+ u16 r_code0, r_code1;
+ int i = 0;
+
+ if (resp_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
+ dev_err(&hdev->pdev->dev,
+ "VF mbx response len(=%d) exceeds maximum(=%d)\n",
+ resp_len,
+ HCLGE_MBX_MAX_RESP_DATA_SIZE);
+ return -EINVAL;
+ }
+
+ while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) {
+ udelay(HCLGEVF_SLEEP_USCOEND);
+ i++;
+ }
+
+ if (i >= HCLGEVF_MAX_TRY_TIMES) {
+ dev_err(&hdev->pdev->dev,
+ "VF could not get mbx resp(=%d) from PF in %d tries\n",
+ hdev->mbx_resp.received_resp, i);
+ return -EIO;
+ }
+
+ mbx_resp = &hdev->mbx_resp;
+ r_code0 = (u16)(mbx_resp->origin_mbx_msg >> 16);
+ r_code1 = (u16)(mbx_resp->origin_mbx_msg & 0xff);
+ if (resp_data)
+ memcpy(resp_data, &mbx_resp->additional_info[0], resp_len);
+
+ hclgevf_reset_mbx_resp_status(hdev);
+
+ if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) {
+ dev_err(&hdev->pdev->dev,
+ "VF could not match resp code(code0=%d,code1=%d), %d",
+ code0, code1, mbx_resp->resp_status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
+ const u8 *msg_data, u8 msg_len, bool need_resp,
+ u8 *resp_data, u16 resp_len)
+{
+ struct hclge_mbx_vf_to_pf_cmd *req;
+ struct hclgevf_desc desc;
+ int status;
+
+ req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
+
+ /* first two bytes are reserved for code & subcode */
+ if (msg_len > (HCLGE_MBX_MAX_MSG_SIZE - 2)) {
+ dev_err(&hdev->pdev->dev,
+ "VF send mbx msg fail, msg len %d exceeds max len %d\n",
+ msg_len, HCLGE_MBX_MAX_MSG_SIZE);
+ return -EINVAL;
+ }
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
+ req->msg[0] = code;
+ req->msg[1] = subcode;
+ memcpy(&req->msg[2], msg_data, msg_len);
+
+ /* synchronous send */
+ if (need_resp) {
+ mutex_lock(&hdev->mbx_resp.mbx_mutex);
+ hclgevf_reset_mbx_resp_status(hdev);
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "VF failed(=%d) to send mbx message to PF\n",
+ status);
+ mutex_unlock(&hdev->mbx_resp.mbx_mutex);
+ return status;
+ }
+
+ status = hclgevf_get_mbx_resp(hdev, code, subcode, resp_data,
+ resp_len);
+ mutex_unlock(&hdev->mbx_resp.mbx_mutex);
+ } else {
+ /* asynchronous send */
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "VF failed(=%d) to send mbx message to PF\n",
+ status);
+ return status;
+ }
+ }
+
+ return status;
+}
+
+void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_mbx_resp_status *resp;
+ struct hclge_mbx_pf_to_vf_cmd *req;
+ struct hclgevf_cmq_ring *crq;
+ struct hclgevf_desc *desc;
+ u16 link_status, flag;
+ u8 *temp;
+ int i;
+
+ resp = &hdev->mbx_resp;
+ crq = &hdev->hw.cmq.crq;
+
+ flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
+ while (hnae_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B)) {
+ desc = &crq->desc[crq->next_to_use];
+ req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
+
+ switch (req->msg[0]) {
+ case HCLGE_MBX_PF_VF_RESP:
+ if (resp->received_resp)
+ dev_warn(&hdev->pdev->dev,
+ "VF mbx resp flag not clear(%d)\n",
+ req->msg[1]);
+ resp->received_resp = true;
+
+ resp->origin_mbx_msg = (req->msg[1] << 16);
+ resp->origin_mbx_msg |= req->msg[2];
+ resp->resp_status = req->msg[3];
+
+ temp = (u8 *)&req->msg[4];
+ for (i = 0; i < HCLGE_MBX_MAX_RESP_DATA_SIZE; i++) {
+ resp->additional_info[i] = *temp;
+ temp++;
+ }
+ break;
+ case HCLGE_MBX_LINK_STAT_CHANGE:
+ link_status = le16_to_cpu(req->msg[1]);
+
+ /* update upper layer with new link link status */
+ hclgevf_update_link_status(hdev, link_status);
+
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "VF received unsupported(%d) mbx msg from PF\n",
+ req->msg[0]);
+ break;
+ }
+ hclge_mbx_ring_ptr_move_crq(crq);
+ flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
+ }
+
+ /* Write back CMDQ_RQ header pointer, M7 need this pointer */
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CRQ_HEAD_REG,
+ crq->next_to_use);
+}
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 241db3199b88..354c0982847b 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -199,18 +199,18 @@ static void __emac_set_multicast_list(struct emac_instance *dev);
static inline int emac_phy_supports_gige(int phy_mode)
{
- return phy_mode == PHY_MODE_GMII ||
- phy_mode == PHY_MODE_RGMII ||
- phy_mode == PHY_MODE_SGMII ||
- phy_mode == PHY_MODE_TBI ||
- phy_mode == PHY_MODE_RTBI;
+ return phy_interface_mode_is_rgmii(phy_mode) ||
+ phy_mode == PHY_INTERFACE_MODE_GMII ||
+ phy_mode == PHY_INTERFACE_MODE_SGMII ||
+ phy_mode == PHY_INTERFACE_MODE_TBI ||
+ phy_mode == PHY_INTERFACE_MODE_RTBI;
}
static inline int emac_phy_gpcs(int phy_mode)
{
- return phy_mode == PHY_MODE_SGMII ||
- phy_mode == PHY_MODE_TBI ||
- phy_mode == PHY_MODE_RTBI;
+ return phy_mode == PHY_INTERFACE_MODE_SGMII ||
+ phy_mode == PHY_INTERFACE_MODE_TBI ||
+ phy_mode == PHY_INTERFACE_MODE_RTBI;
}
static inline void emac_tx_enable(struct emac_instance *dev)
@@ -2871,7 +2871,7 @@ static int emac_init_config(struct emac_instance *dev)
/* PHY mode needs some decoding */
dev->phy_mode = of_get_phy_mode(np);
if (dev->phy_mode < 0)
- dev->phy_mode = PHY_MODE_NA;
+ dev->phy_mode = PHY_INTERFACE_MODE_NA;
/* Check EMAC version */
if (of_device_is_compatible(np, "ibm,emac4sync")) {
@@ -3174,7 +3174,7 @@ static int emac_probe(struct platform_device *ofdev)
printk(KERN_INFO "%s: EMAC-%d %pOF, MAC %pM\n",
ndev->name, dev->cell_index, np, ndev->dev_addr);
- if (dev->phy_mode == PHY_MODE_SGMII)
+ if (dev->phy_mode == PHY_INTERFACE_MODE_SGMII)
printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
if (dev->phy.address >= 0)
diff --git a/drivers/net/ethernet/ibm/emac/emac.h b/drivers/net/ethernet/ibm/emac/emac.h
index c26d2631ca30..e2f80cca9bed 100644
--- a/drivers/net/ethernet/ibm/emac/emac.h
+++ b/drivers/net/ethernet/ibm/emac/emac.h
@@ -104,19 +104,6 @@ struct emac_regs {
} u1;
};
-/*
- * PHY mode settings (EMAC <-> ZMII/RGMII bridge <-> PHY)
- */
-#define PHY_MODE_NA PHY_INTERFACE_MODE_NA
-#define PHY_MODE_MII PHY_INTERFACE_MODE_MII
-#define PHY_MODE_RMII PHY_INTERFACE_MODE_RMII
-#define PHY_MODE_SMII PHY_INTERFACE_MODE_SMII
-#define PHY_MODE_RGMII PHY_INTERFACE_MODE_RGMII
-#define PHY_MODE_TBI PHY_INTERFACE_MODE_TBI
-#define PHY_MODE_GMII PHY_INTERFACE_MODE_GMII
-#define PHY_MODE_RTBI PHY_INTERFACE_MODE_RTBI
-#define PHY_MODE_SGMII PHY_INTERFACE_MODE_SGMII
-
/* EMACx_MR0 */
#define EMAC_MR0_RXI 0x80000000
#define EMAC_MR0_TXI 0x40000000
diff --git a/drivers/net/ethernet/ibm/emac/phy.c b/drivers/net/ethernet/ibm/emac/phy.c
index 35865d05fccd..aa070c063e48 100644
--- a/drivers/net/ethernet/ibm/emac/phy.c
+++ b/drivers/net/ethernet/ibm/emac/phy.c
@@ -96,7 +96,7 @@ int emac_mii_reset_gpcs(struct mii_phy *phy)
if ((val & BMCR_ISOLATE) && limit > 0)
gpcs_phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE);
- if (limit > 0 && phy->mode == PHY_MODE_SGMII) {
+ if (limit > 0 && phy->mode == PHY_INTERFACE_MODE_SGMII) {
/* Configure GPCS interface to recommended setting for SGMII */
gpcs_phy_write(phy, 0x04, 0x8120); /* AsymPause, FDX */
gpcs_phy_write(phy, 0x07, 0x2801); /* msg_pg, toggle */
@@ -313,16 +313,16 @@ static int cis8201_init(struct mii_phy *phy)
epcr &= ~EPCR_MODE_MASK;
switch (phy->mode) {
- case PHY_MODE_TBI:
+ case PHY_INTERFACE_MODE_TBI:
epcr |= EPCR_TBI_MODE;
break;
- case PHY_MODE_RTBI:
+ case PHY_INTERFACE_MODE_RTBI:
epcr |= EPCR_RTBI_MODE;
break;
- case PHY_MODE_GMII:
+ case PHY_INTERFACE_MODE_GMII:
epcr |= EPCR_GMII_MODE;
break;
- case PHY_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII:
default:
epcr |= EPCR_RGMII_MODE;
}
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
index c4a1ac38bba8..00f5999de3cf 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -52,43 +52,28 @@
/* RGMII bridge supports only GMII/TBI and RGMII/RTBI PHYs */
static inline int rgmii_valid_mode(int phy_mode)
{
- return phy_mode == PHY_MODE_GMII ||
- phy_mode == PHY_MODE_MII ||
- phy_mode == PHY_MODE_RGMII ||
- phy_mode == PHY_MODE_TBI ||
- phy_mode == PHY_MODE_RTBI;
-}
-
-static inline const char *rgmii_mode_name(int mode)
-{
- switch (mode) {
- case PHY_MODE_RGMII:
- return "RGMII";
- case PHY_MODE_TBI:
- return "TBI";
- case PHY_MODE_GMII:
- return "GMII";
- case PHY_MODE_MII:
- return "MII";
- case PHY_MODE_RTBI:
- return "RTBI";
- default:
- BUG();
- }
+ return phy_interface_mode_is_rgmii(phy_mode) ||
+ phy_mode == PHY_INTERFACE_MODE_GMII ||
+ phy_mode == PHY_INTERFACE_MODE_MII ||
+ phy_mode == PHY_INTERFACE_MODE_TBI ||
+ phy_mode == PHY_INTERFACE_MODE_RTBI;
}
static inline u32 rgmii_mode_mask(int mode, int input)
{
switch (mode) {
- case PHY_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
return RGMII_FER_RGMII(input);
- case PHY_MODE_TBI:
+ case PHY_INTERFACE_MODE_TBI:
return RGMII_FER_TBI(input);
- case PHY_MODE_GMII:
+ case PHY_INTERFACE_MODE_GMII:
return RGMII_FER_GMII(input);
- case PHY_MODE_MII:
+ case PHY_INTERFACE_MODE_MII:
return RGMII_FER_MII(input);
- case PHY_MODE_RTBI:
+ case PHY_INTERFACE_MODE_RTBI:
return RGMII_FER_RTBI(input);
default:
BUG();
@@ -115,7 +100,7 @@ int rgmii_attach(struct platform_device *ofdev, int input, int mode)
out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input));
printk(KERN_NOTICE "%pOF: input %d in %s mode\n",
- ofdev->dev.of_node, input, rgmii_mode_name(mode));
+ ofdev->dev.of_node, input, phy_modes(mode));
++dev->users;
diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c
index 89c42d362292..fdcc734541fe 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.c
+++ b/drivers/net/ethernet/ibm/emac/zmii.c
@@ -49,20 +49,20 @@
*/
static inline int zmii_valid_mode(int mode)
{
- return mode == PHY_MODE_MII ||
- mode == PHY_MODE_RMII ||
- mode == PHY_MODE_SMII ||
- mode == PHY_MODE_NA;
+ return mode == PHY_INTERFACE_MODE_MII ||
+ mode == PHY_INTERFACE_MODE_RMII ||
+ mode == PHY_INTERFACE_MODE_SMII ||
+ mode == PHY_INTERFACE_MODE_NA;
}
static inline const char *zmii_mode_name(int mode)
{
switch (mode) {
- case PHY_MODE_MII:
+ case PHY_INTERFACE_MODE_MII:
return "MII";
- case PHY_MODE_RMII:
+ case PHY_INTERFACE_MODE_RMII:
return "RMII";
- case PHY_MODE_SMII:
+ case PHY_INTERFACE_MODE_SMII:
return "SMII";
default:
BUG();
@@ -72,11 +72,11 @@ static inline const char *zmii_mode_name(int mode)
static inline u32 zmii_mode_mask(int mode, int input)
{
switch (mode) {
- case PHY_MODE_MII:
+ case PHY_INTERFACE_MODE_MII:
return ZMII_FER_MII(input);
- case PHY_MODE_RMII:
+ case PHY_INTERFACE_MODE_RMII:
return ZMII_FER_RMII(input);
- case PHY_MODE_SMII:
+ case PHY_INTERFACE_MODE_SMII:
return ZMII_FER_SMII(input);
default:
return 0;
@@ -106,27 +106,27 @@ int zmii_attach(struct platform_device *ofdev, int input, int *mode)
* Please, always specify PHY mode in your board port to avoid
* any surprises.
*/
- if (dev->mode == PHY_MODE_NA) {
- if (*mode == PHY_MODE_NA) {
+ if (dev->mode == PHY_INTERFACE_MODE_NA) {
+ if (*mode == PHY_INTERFACE_MODE_NA) {
u32 r = dev->fer_save;
ZMII_DBG(dev, "autodetecting mode, FER = 0x%08x" NL, r);
if (r & (ZMII_FER_MII(0) | ZMII_FER_MII(1)))
- dev->mode = PHY_MODE_MII;
+ dev->mode = PHY_INTERFACE_MODE_MII;
else if (r & (ZMII_FER_RMII(0) | ZMII_FER_RMII(1)))
- dev->mode = PHY_MODE_RMII;
+ dev->mode = PHY_INTERFACE_MODE_RMII;
else
- dev->mode = PHY_MODE_SMII;
- } else
+ dev->mode = PHY_INTERFACE_MODE_SMII;
+ } else {
dev->mode = *mode;
-
+ }
printk(KERN_NOTICE "%pOF: bridge in %s mode\n",
ofdev->dev.of_node,
zmii_mode_name(dev->mode));
} else {
/* All inputs must use the same mode */
- if (*mode != PHY_MODE_NA && *mode != dev->mode) {
+ if (*mode != PHY_INTERFACE_MODE_NA && *mode != dev->mode) {
printk(KERN_ERR
"%pOF: invalid mode %d specified for input %d\n",
ofdev->dev.of_node, *mode, input);
@@ -246,7 +246,7 @@ static int zmii_probe(struct platform_device *ofdev)
mutex_init(&dev->lock);
dev->ofdev = ofdev;
- dev->mode = PHY_MODE_NA;
+ dev->mode = PHY_INTERFACE_MODE_NA;
rc = -ENXIO;
if (of_address_to_resource(np, 0, &regs)) {
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index b65f5f3ac034..8c3058d5d191 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -59,6 +59,7 @@
#include <linux/mm.h>
#include <linux/ethtool.h>
#include <linux/proc_fs.h>
+#include <linux/if_arp.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
@@ -1177,6 +1178,9 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
hdr_len[2] = tcp_hdrlen(skb);
else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
hdr_len[2] = sizeof(struct udphdr);
+ } else if (skb->protocol == htons(ETH_P_ARP)) {
+ hdr_len[1] = arp_hdr_len(skb->dev);
+ hdr_len[2] = 0;
}
memset(hdr_data, 0, 120);
@@ -1412,7 +1416,8 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
/* determine if l2/3/4 headers are sent to firmware */
if ((*hdrs >> 7) & 1 &&
(skb->protocol == htons(ETH_P_IP) ||
- skb->protocol == htons(ETH_P_IPV6))) {
+ skb->protocol == htons(ETH_P_IPV6) ||
+ skb->protocol == htons(ETH_P_ARP))) {
build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
tx_crq.v1.n_crq_elem = num_entries;
tx_buff->indir_arr[0] = tx_crq;
@@ -1543,15 +1548,19 @@ static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p)
crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
+
+ init_completion(&adapter->fw_done);
ibmvnic_send_crq(adapter, &crq);
+ wait_for_completion(&adapter->fw_done);
/* netdev->dev_addr is changed in handle_change_mac_rsp function */
- return 0;
+ return adapter->fw_done_rc ? -EIO : 0;
}
static int ibmvnic_set_mac(struct net_device *netdev, void *p)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
struct sockaddr *addr = p;
+ int rc;
if (adapter->state == VNIC_PROBED) {
memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
@@ -1559,9 +1568,9 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
return 0;
}
- __ibmvnic_set_mac(netdev, addr);
+ rc = __ibmvnic_set_mac(netdev, addr);
- return 0;
+ return rc;
}
/**
@@ -2484,6 +2493,12 @@ static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
struct ibmvnic_sub_crq_queue *scrq = instance;
struct ibmvnic_adapter *adapter = scrq->adapter;
+ /* When booting a kdump kernel we can hit pending interrupts
+ * prior to completing driver initialization.
+ */
+ if (unlikely(adapter->state != VNIC_OPEN))
+ return IRQ_NONE;
+
adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
@@ -3558,8 +3573,8 @@ static void handle_error_indication(union ibmvnic_crq *crq,
ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
}
-static void handle_change_mac_rsp(union ibmvnic_crq *crq,
- struct ibmvnic_adapter *adapter)
+static int handle_change_mac_rsp(union ibmvnic_crq *crq,
+ struct ibmvnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct device *dev = &adapter->vdev->dev;
@@ -3568,10 +3583,13 @@ static void handle_change_mac_rsp(union ibmvnic_crq *crq,
rc = crq->change_mac_addr_rsp.rc.code;
if (rc) {
dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
- return;
+ goto out;
}
memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
ETH_ALEN);
+out:
+ complete(&adapter->fw_done);
+ return rc;
}
static void handle_request_cap_rsp(union ibmvnic_crq *crq,
@@ -4031,7 +4049,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
break;
case CHANGE_MAC_ADDR_RSP:
netdev_dbg(netdev, "Got MAC address change Response\n");
- handle_change_mac_rsp(crq, adapter);
+ adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
break;
case ERROR_INDICATION:
netdev_dbg(netdev, "Got Error Indication\n");
@@ -4335,7 +4353,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
}
netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
- IBMVNIC_MAX_TX_QUEUES);
+ IBMVNIC_MAX_QUEUES);
if (!netdev)
return -ENOMEM;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 3aec42118db2..fe21a6e2ddae 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -39,7 +39,7 @@
#define IBMVNIC_RX_WEIGHT 16
/* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */
#define IBMVNIC_BUFFS_PER_POOL 100
-#define IBMVNIC_MAX_TX_QUEUES 5
+#define IBMVNIC_MAX_QUEUES 10
#define IBMVNIC_TSO_BUF_SZ 65536
#define IBMVNIC_TSO_BUFS 64
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 3b3983a1ffbb..dc71e87c3260 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1838,8 +1838,8 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
p = (char *)adapter + stat->stat_offset;
break;
default:
- WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n",
- stat->type, i);
+ netdev_WARN_ONCE(netdev, "Invalid E1000 stat type: %u index %d\n",
+ stat->type, i);
continue;
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 9f18d39bdc8f..1298b69f990b 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3303,9 +3303,11 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
if (adapter->flags & FLAG_IS_ICH) {
u32 rxdctl = er32(RXDCTL(0));
- ew32(RXDCTL(0), rxdctl | 0x3);
+ ew32(RXDCTL(0), rxdctl | 0x3 | BIT(8));
}
+ dev_info(&adapter->pdev->dev,
+ "Some CPU C-states have been disabled in order to enable jumbo frames\n");
pm_qos_update_request(&adapter->pm_qos_req, lat);
} else {
pm_qos_update_request(&adapter->pm_qos_req,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index ea3ab24265ee..760cfa52d02c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -353,7 +353,7 @@ int fm10k_iov_resume(struct pci_dev *pdev)
struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
/* allocate all but the last GLORT to the VFs */
- if (i == ((~hw->mac.dglort_map) >> FM10K_DGLORTMAP_MASK_SHIFT))
+ if (i == (~hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT))
break;
/* assign GLORT to VF, and restrict it to multicast */
@@ -511,7 +511,7 @@ int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
return err;
/* allocate VFs if not already allocated */
- if (num_vfs && (num_vfs != current_vfs)) {
+ if (num_vfs && num_vfs != current_vfs) {
/* Disable completer abort error reporting as
* the VFs can trigger this any time they read a queue
* that they don't own.
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 538b42d5c187..8e12aae065d8 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -446,13 +446,13 @@ static void fm10k_type_trans(struct fm10k_ring *rx_ring,
skb->protocol = eth_type_trans(skb, dev);
+ /* Record Rx queue, or update macvlan statistics */
if (!l2_accel)
- return;
-
- /* update MACVLAN statistics */
- macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, 1,
- !!(rx_desc->w.hdr_info &
- cpu_to_le16(FM10K_RXD_HDR_INFO_XC_MASK)));
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ else
+ macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
+ (skb->pkt_type == PACKET_BROADCAST) ||
+ (skb->pkt_type == PACKET_MULTICAST));
}
/**
@@ -479,8 +479,6 @@ static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring,
FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan;
- skb_record_rx_queue(skb, rx_ring->queue_index);
-
FM10K_CB(skb)->fi.d.glort = rx_desc->d.glort;
if (rx_desc->w.vlan) {
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index adc62fb38c49..a38ae5c54da3 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -934,8 +934,12 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
if (vid >= VLAN_N_VID)
return -EINVAL;
- /* Verify we have permission to add VLANs */
- if (hw->mac.vlan_override)
+ /* Verify that we have permission to add VLANs. If this is a request
+ * to remove a VLAN, we still want to allow the user to remove the
+ * VLAN device. In that case, we need to clear the bit in the
+ * active_vlans bitmask.
+ */
+ if (set && hw->mac.vlan_override)
return -EACCES;
/* update active_vlans bitmask */
@@ -954,6 +958,12 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
rx_ring->vid &= ~FM10K_VLAN_CLEAR;
}
+ /* If our VLAN has been overridden, there is no reason to send VLAN
+ * removal requests as they will be silently ignored.
+ */
+ if (hw->mac.vlan_override)
+ return 0;
+
/* Do not remove default VLAN ID related entries from VLAN and MAC
* tables
*/
@@ -1040,14 +1050,13 @@ static int __fm10k_uc_sync(struct net_device *dev,
const unsigned char *addr, bool sync)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_hw *hw = &interface->hw;
u16 vid, glort = interface->glort;
s32 err;
if (!is_valid_ether_addr(addr))
return -EADDRNOTAVAIL;
- for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
+ for (vid = fm10k_find_next_vlan(interface, 0);
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
err = fm10k_queue_mac_request(interface, glort,
@@ -1106,14 +1115,13 @@ static int __fm10k_mc_sync(struct net_device *dev,
const unsigned char *addr, bool sync)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_hw *hw = &interface->hw;
u16 vid, glort = interface->glort;
s32 err;
if (!is_multicast_ether_addr(addr))
return -EADDRNOTAVAIL;
- for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
+ for (vid = fm10k_find_next_vlan(interface, 0);
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
err = fm10k_queue_mac_request(interface, glort,
@@ -1157,10 +1165,12 @@ static void fm10k_set_rx_mode(struct net_device *dev)
/* update xcast mode first, but only if it changed */
if (interface->xcast_mode != xcast_mode) {
- /* update VLAN table */
+ /* update VLAN table when entering promiscuous mode */
if (xcast_mode == FM10K_XCAST_MODE_PROMISC)
fm10k_queue_vlan_request(interface, FM10K_VLAN_ALL,
0, true);
+
+ /* clear VLAN table when exiting promiscuous mode */
if (interface->xcast_mode == FM10K_XCAST_MODE_PROMISC)
fm10k_clear_unused_vlans(interface);
@@ -1182,9 +1192,10 @@ static void fm10k_set_rx_mode(struct net_device *dev)
void fm10k_restore_rx_state(struct fm10k_intfc *interface)
{
+ struct fm10k_l2_accel *l2_accel = interface->l2_accel;
struct net_device *netdev = interface->netdev;
struct fm10k_hw *hw = &interface->hw;
- int xcast_mode;
+ int xcast_mode, i;
u16 vid, glort;
/* record glort for this interface */
@@ -1211,11 +1222,8 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
fm10k_queue_vlan_request(interface, FM10K_VLAN_ALL, 0,
xcast_mode == FM10K_XCAST_MODE_PROMISC);
- /* Add filter for VLAN 0 */
- fm10k_queue_vlan_request(interface, 0, 0, true);
-
/* update table with current entries */
- for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
+ for (vid = fm10k_find_next_vlan(interface, 0);
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
fm10k_queue_vlan_request(interface, vid, 0, true);
@@ -1234,6 +1242,24 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
__dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync);
__dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync);
+ /* synchronize macvlan addresses */
+ if (l2_accel) {
+ for (i = 0; i < l2_accel->size; i++) {
+ struct net_device *sdev = l2_accel->macvlan[i];
+
+ if (!sdev)
+ continue;
+
+ glort = l2_accel->dglort + 1 + i;
+
+ hw->mac.ops.update_xcast_mode(hw, glort,
+ FM10K_XCAST_MODE_MULTI);
+ fm10k_queue_mac_request(interface, glort,
+ sdev->dev_addr,
+ hw->mac.default_vid, true);
+ }
+ }
+
fm10k_mbx_unlock(interface);
/* record updated xcast mode state */
@@ -1490,7 +1516,7 @@ static void *fm10k_dfwd_add_station(struct net_device *dev,
hw->mac.ops.update_xcast_mode(hw, glort,
FM10K_XCAST_MODE_MULTI);
fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
- 0, true);
+ hw->mac.default_vid, true);
}
fm10k_mbx_unlock(interface);
@@ -1530,7 +1556,7 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv)
hw->mac.ops.update_xcast_mode(hw, glort,
FM10K_XCAST_MODE_NONE);
fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
- 0, false);
+ hw->mac.default_vid, false);
}
fm10k_mbx_unlock(interface);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 425d814aed4d..d6406fc31ffb 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -866,7 +866,7 @@ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
/* Determine correct default VLAN ID. The FM10K_VLAN_OVERRIDE bit is
* used here to indicate to the VF that it will not have privilege to
* write VLAN_TABLE. All policy is enforced on the PF but this allows
- * the VF to correctly report errors to userspace rqeuests.
+ * the VF to correctly report errors to userspace requests.
*/
if (vf_info->pf_vid)
vf_vid = vf_info->pf_vid | FM10K_VLAN_OVERRIDE;
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index e019baa905c5..46e9f4e0a02c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -508,39 +508,40 @@ struct i40e_pf {
#define I40E_HW_PORT_ID_VALID BIT(17)
#define I40E_HW_RESTART_AUTONEG BIT(18)
- u32 flags;
-#define I40E_FLAG_RX_CSUM_ENABLED BIT(0)
-#define I40E_FLAG_MSI_ENABLED BIT(1)
-#define I40E_FLAG_MSIX_ENABLED BIT(2)
-#define I40E_FLAG_RSS_ENABLED BIT(3)
-#define I40E_FLAG_VMDQ_ENABLED BIT(4)
-#define I40E_FLAG_FILTER_SYNC BIT(5)
-#define I40E_FLAG_SRIOV_ENABLED BIT(6)
-#define I40E_FLAG_DCB_CAPABLE BIT(7)
-#define I40E_FLAG_DCB_ENABLED BIT(8)
-#define I40E_FLAG_FD_SB_ENABLED BIT(9)
-#define I40E_FLAG_FD_ATR_ENABLED BIT(10)
-#define I40E_FLAG_FD_SB_AUTO_DISABLED BIT(11)
-#define I40E_FLAG_FD_ATR_AUTO_DISABLED BIT(12)
-#define I40E_FLAG_MFP_ENABLED BIT(13)
-#define I40E_FLAG_UDP_FILTER_SYNC BIT(14)
-#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT(15)
-#define I40E_FLAG_VEB_MODE_ENABLED BIT(16)
-#define I40E_FLAG_VEB_STATS_ENABLED BIT(17)
-#define I40E_FLAG_LINK_POLLING_ENABLED BIT(18)
-#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT(19)
-#define I40E_FLAG_TEMP_LINK_POLLING BIT(20)
-#define I40E_FLAG_LEGACY_RX BIT(21)
-#define I40E_FLAG_PTP BIT(22)
-#define I40E_FLAG_IWARP_ENABLED BIT(23)
-#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT(24)
-#define I40E_FLAG_CLIENT_L2_CHANGE BIT(25)
-#define I40E_FLAG_CLIENT_RESET BIT(26)
-#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT(27)
-#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT(28)
-#define I40E_FLAG_TC_MQPRIO BIT(29)
-#define I40E_FLAG_FD_SB_INACTIVE BIT(30)
-#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT(31)
+ u64 flags;
+#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(0)
+#define I40E_FLAG_MSI_ENABLED BIT_ULL(1)
+#define I40E_FLAG_MSIX_ENABLED BIT_ULL(2)
+#define I40E_FLAG_RSS_ENABLED BIT_ULL(3)
+#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(4)
+#define I40E_FLAG_FILTER_SYNC BIT_ULL(5)
+#define I40E_FLAG_SRIOV_ENABLED BIT_ULL(6)
+#define I40E_FLAG_DCB_CAPABLE BIT_ULL(7)
+#define I40E_FLAG_DCB_ENABLED BIT_ULL(8)
+#define I40E_FLAG_FD_SB_ENABLED BIT_ULL(9)
+#define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(10)
+#define I40E_FLAG_FD_SB_AUTO_DISABLED BIT_ULL(11)
+#define I40E_FLAG_FD_ATR_AUTO_DISABLED BIT_ULL(12)
+#define I40E_FLAG_MFP_ENABLED BIT_ULL(13)
+#define I40E_FLAG_UDP_FILTER_SYNC BIT_ULL(14)
+#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT_ULL(15)
+#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(16)
+#define I40E_FLAG_VEB_STATS_ENABLED BIT_ULL(17)
+#define I40E_FLAG_LINK_POLLING_ENABLED BIT_ULL(18)
+#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(19)
+#define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(20)
+#define I40E_FLAG_LEGACY_RX BIT_ULL(21)
+#define I40E_FLAG_PTP BIT_ULL(22)
+#define I40E_FLAG_IWARP_ENABLED BIT_ULL(23)
+#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(24)
+#define I40E_FLAG_CLIENT_L2_CHANGE BIT_ULL(25)
+#define I40E_FLAG_CLIENT_RESET BIT_ULL(26)
+#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT_ULL(27)
+#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT_ULL(28)
+#define I40E_FLAG_TC_MQPRIO BIT_ULL(29)
+#define I40E_FLAG_FD_SB_INACTIVE BIT_ULL(30)
+#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT_ULL(31)
+#define I40E_FLAG_DISABLE_FW_LLDP BIT_ULL(32)
struct i40e_client_instance *cinst;
bool stat_offsets_loaded;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 9af74253c3f7..e78971605e0b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -907,10 +907,15 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
- i40e_debug(hw,
- I40E_DEBUG_AQ_MESSAGE,
- "AQTX: Writeback timeout.\n");
- status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: AQ Critical error.\n");
+ status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
+ } else {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
}
asq_send_command_error:
@@ -971,7 +976,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
}
/* set next_to_use to head */
- ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+ ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
@@ -1027,7 +1032,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
hw->aq.arq.next_to_clean = ntc;
hw->aq.arq.next_to_use = ntu;
- i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode));
+ i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode), &e->desc);
clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index b0188b8f91ba..a852775d3059 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -198,13 +198,14 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
- /* Pipeline Personalization Profile */
+ /* Dynamic Device Personalization */
i40e_aqc_opc_write_personalization_profile = 0x0270,
i40e_aqc_opc_get_personalization_profile_list = 0x0271,
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
+ i40e_aqc_opc_set_dcb_parameters = 0x0303,
/* TX scheduler */
i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
@@ -1594,7 +1595,7 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
-/* Pipeline Personalization Profile */
+/* Dynamic Device Personalization */
struct i40e_aqc_write_personalization_profile {
u8 flags;
u8 reserved[3];
@@ -1605,7 +1606,7 @@ struct i40e_aqc_write_personalization_profile {
I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile);
-struct i40e_aqc_write_ppp_resp {
+struct i40e_aqc_write_ddp_resp {
__le32 error_offset;
__le32 error_info;
__le32 addr_high;
@@ -1614,8 +1615,8 @@ struct i40e_aqc_write_ppp_resp {
struct i40e_aqc_get_applied_profiles {
u8 flags;
-#define I40E_AQC_GET_PPP_GET_CONF 0x1
-#define I40E_AQC_GET_PPP_GET_RDPU_CONF 0x2
+#define I40E_AQC_GET_DDP_GET_CONF 0x1
+#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2
u8 rsv[3];
__le32 reserved;
__le32 addr_high;
@@ -2231,8 +2232,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
*/
struct i40e_aqc_nvm_update {
u8 command_flags;
-#define I40E_AQ_NVM_LAST_CMD 0x01
-#define I40E_AQ_NVM_FLASH_ONLY 0x80
+#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_FLASH_ONLY 0x80
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01
u8 module_pointer;
__le16 length;
__le32 offset;
@@ -2492,6 +2497,17 @@ struct i40e_aqc_lldp_start {
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
+/* Set DCB (direct 0x0303) */
+struct i40e_aqc_set_dcb_parameters {
+ u8 command;
+#define I40E_AQ_DCB_SET_AGENT 0x1
+#define I40E_DCB_VALID 0x1
+ u8 valid_flags;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters);
+
/* Get CEE DCBX Oper Config (0x0A07)
* uses the generic descriptor struct
* returns below as indirect response
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 1b1e2acbd07f..0de9610c1d8d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -378,11 +378,11 @@ void i40e_client_subtask(struct i40e_pf *pf)
if (!client || !cdev)
return;
- /* Here we handle client opens. If the client is down, but
- * the netdev is up, then open the client.
+ /* Here we handle client opens. If the client is down, and
+ * the netdev is registered, then open the client.
*/
if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
- if (!test_bit(__I40E_VSI_DOWN, vsi->state) &&
+ if (vsi->netdev_registered &&
client->ops && client->ops->open) {
set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
ret = client->ops->open(&cdev->lan_info, client);
@@ -393,17 +393,19 @@ void i40e_client_subtask(struct i40e_pf *pf)
i40e_client_del_instance(pf);
}
}
- } else {
- /* Likewise for client close. If the client is up, but the netdev
- * is down, then close the client.
- */
- if (test_bit(__I40E_VSI_DOWN, vsi->state) &&
- client->ops && client->ops->close) {
- clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
- client->ops->close(&cdev->lan_info, client, false);
- i40e_client_release_qvlist(&cdev->lan_info);
- }
}
+
+ /* enable/disable PE TCP_ENA flag based on netdev down/up
+ */
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
+ i40e_client_update_vsi_ctxt(&cdev->lan_info, client,
+ 0, 0, 0,
+ I40E_CLIENT_VSI_FLAG_TCP_ENABLE);
+ else
+ i40e_client_update_vsi_ctxt(&cdev->lan_info, client,
+ 0, 0,
+ I40E_CLIENT_VSI_FLAG_TCP_ENABLE,
+ I40E_CLIENT_VSI_FLAG_TCP_ENABLE);
}
/**
@@ -717,13 +719,13 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
return -ENOENT;
}
- if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) &&
- (flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) {
+ if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE) &&
+ (flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE)) {
ctxt.info.valid_sections =
cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
- } else if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) &&
- !(flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) {
+ } else if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE) &&
+ !(flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE)) {
ctxt.info.valid_sections =
cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
ctxt.info.queueing_opt_flags &= ~I40E_AQ_VSI_QUE_OPT_TCP_ENA;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h
index 15b21a5315b5..ba55c889e4c5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.h
@@ -132,7 +132,7 @@ struct i40e_info {
#define I40E_CLIENT_RESET_LEVEL_PF 1
#define I40E_CLIENT_RESET_LEVEL_CORE 2
-#define I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE BIT(1)
+#define I40E_CLIENT_VSI_FLAG_TCP_ENABLE BIT(1)
struct i40e_ops {
/* setup_q_vector_list enables queues with a particular vector */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 095965f268bd..ef5a868aae46 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -278,6 +278,8 @@ const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
return "I40E_NOT_SUPPORTED";
case I40E_ERR_FIRMWARE_API_VERSION:
return "I40E_ERR_FIRMWARE_API_VERSION";
+ case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
+ return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
}
snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
@@ -1486,6 +1488,7 @@ u32 i40e_led_get(struct i40e_hw *hw)
case I40E_COMBINED_ACTIVITY:
case I40E_FILTER_ACTIVITY:
case I40E_MAC_ACTIVITY:
+ case I40E_LINK_ACTIVITY:
continue;
default:
break;
@@ -1534,6 +1537,7 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
case I40E_COMBINED_ACTIVITY:
case I40E_FILTER_ACTIVITY:
case I40E_MAC_ACTIVITY:
+ case I40E_LINK_ACTIVITY:
continue;
default:
break;
@@ -1544,9 +1548,6 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
- if (mode == I40E_LINK_ACTIVITY)
- blink = false;
-
if (blink)
gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
else
@@ -3465,13 +3466,14 @@ exit:
* @length: length of the section to be written (in bytes from the offset)
* @data: command buffer (size [bytes] = length)
* @last_command: tells if this is the last command in a series
+ * @preservation_flags: Preservation mode flags
* @cmd_details: pointer to command details structure or NULL
*
* Update the NVM using the admin queue commands
**/
i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
- bool last_command,
+ bool last_command, u8 preservation_flags,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -3490,6 +3492,16 @@ i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
/* If this is the last command in a series, set the proper flag. */
if (last_command)
cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
+ cmd->command_flags |=
+ (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
+ I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
+ else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
+ cmd->command_flags |=
+ (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
+ I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
+ }
cmd->module_pointer = module_pointer;
cmd->offset = cpu_to_le32(offset);
cmd->length = cpu_to_le16(length);
@@ -3629,7 +3641,34 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
cmd->command = I40E_AQ_LLDP_AGENT_START;
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+/**
+ * i40e_aq_set_dcb_parameters
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ * @dcb_enable: True if DCB configuration needs to be applied
+ *
+ **/
+enum i40e_status_code
+i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_dcb_parameters *cmd =
+ (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_dcb_parameters);
+
+ if (dcb_enable) {
+ cmd->valid_flags = I40E_DCB_VALID;
+ cmd->command = I40E_AQ_DCB_SET_AGENT;
+ }
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@@ -5236,7 +5275,7 @@ i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
}
/**
- * i40e_aq_write_ppp - Write pipeline personalization profile (ppp)
+ * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
* @hw: pointer to the hw struct
* @buff: command buffer (size in bytes = buff_size)
* @buff_size: buffer size in bytes
@@ -5246,7 +5285,7 @@ i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
* @cmd_details: pointer to command details structure or NULL
**/
enum
-i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
struct i40e_asq_cmd_details *cmd_details)
@@ -5255,7 +5294,7 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
struct i40e_aqc_write_personalization_profile *cmd =
(struct i40e_aqc_write_personalization_profile *)
&desc.params.raw;
- struct i40e_aqc_write_ppp_resp *resp;
+ struct i40e_aqc_write_ddp_resp *resp;
i40e_status status;
i40e_fill_default_direct_cmd_desc(&desc,
@@ -5271,7 +5310,7 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
if (!status) {
- resp = (struct i40e_aqc_write_ppp_resp *)&desc.params.raw;
+ resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
if (error_offset)
*error_offset = le32_to_cpu(resp->error_offset);
if (error_info)
@@ -5282,14 +5321,14 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
}
/**
- * i40e_aq_get_ppp_list - Read pipeline personalization profile (ppp)
+ * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
* @hw: pointer to the hw struct
* @buff: command buffer (size in bytes = buff_size)
* @buff_size: buffer size in bytes
* @cmd_details: pointer to command details structure or NULL
**/
enum
-i40e_status_code i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
u16 buff_size, u8 flags,
struct i40e_asq_cmd_details *cmd_details)
{
@@ -5364,11 +5403,6 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u32 offset = 0, info = 0;
u32 i;
- if (!track_id) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0.");
- return I40E_NOT_SUPPORTED;
- }
-
dev_cnt = profile->device_table_count;
for (i = 0; i < dev_cnt; i++) {
@@ -5378,7 +5412,7 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
break;
}
if (i == dev_cnt) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support PPP");
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP");
return I40E_ERR_DEVICE_NOT_SUPPORTED;
}
@@ -5397,7 +5431,7 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
sizeof(struct i40e_profile_section_header);
/* Write profile */
- status = i40e_aq_write_ppp(hw, (void *)sec, (u16)section_size,
+ status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
track_id, &offset, &info, NULL);
if (status) {
i40e_debug(hw, I40E_DEBUG_PACKAGE,
@@ -5439,10 +5473,10 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw,
sec->section.offset);
pinfo->track_id = track_id;
pinfo->version = profile->version;
- pinfo->op = I40E_PPP_ADD_TRACKID;
- memcpy(pinfo->name, profile->name, I40E_PPP_NAME_SIZE);
+ pinfo->op = I40E_DDP_ADD_TRACKID;
+ memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
- status = i40e_aq_write_ppp(hw, (void *)sec, sec->data_end,
+ status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
track_id, &offset, &info, NULL);
return status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 5f6cf7212d4f..2f5bee713fef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -126,6 +126,10 @@ static const struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
I40E_PF_STAT("link_xon_tx", stats.link_xon_tx),
I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
+ I40E_PF_STAT("priority_xon_rx", stats.priority_xon_rx),
+ I40E_PF_STAT("priority_xoff_rx", stats.priority_xoff_rx),
+ I40E_PF_STAT("priority_xon_tx", stats.priority_xon_tx),
+ I40E_PF_STAT("priority_xoff_tx", stats.priority_xoff_tx),
I40E_PF_STAT("rx_size_64", stats.rx_size_64),
I40E_PF_STAT("rx_size_127", stats.rx_size_127),
I40E_PF_STAT("rx_size_255", stats.rx_size_255),
@@ -229,6 +233,7 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0),
I40E_PRIV_FLAG("disable-source-pruning",
I40E_FLAG_SOURCE_PRUNING_DISABLED, 0),
+ I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_DISABLE_FW_LLDP, 0),
};
#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
@@ -1585,6 +1590,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
*/
rx_rings[i].desc = NULL;
rx_rings[i].rx_bi = NULL;
+ /* Clear cloned XDP RX-queue info before setup call */
+ memset(&rx_rings[i].xdp_rxq, 0, sizeof(rx_rings[i].xdp_rxq));
/* this is to allow wr32 to have something to write to
* during early allocation of Rx buffers
*/
@@ -2299,6 +2306,8 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
struct ethtool_coalesce *ec,
int queue)
{
+ struct i40e_ring *rx_ring = vsi->rx_rings[queue];
+ struct i40e_ring *tx_ring = vsi->tx_rings[queue];
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_q_vector *q_vector;
@@ -2306,26 +2315,26 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
intrl = i40e_intrl_usec_to_reg(vsi->int_rate_limit);
- vsi->rx_rings[queue]->rx_itr_setting = ec->rx_coalesce_usecs;
- vsi->tx_rings[queue]->tx_itr_setting = ec->tx_coalesce_usecs;
+ rx_ring->rx_itr_setting = ec->rx_coalesce_usecs;
+ tx_ring->tx_itr_setting = ec->tx_coalesce_usecs;
if (ec->use_adaptive_rx_coalesce)
- vsi->rx_rings[queue]->rx_itr_setting |= I40E_ITR_DYNAMIC;
+ rx_ring->rx_itr_setting |= I40E_ITR_DYNAMIC;
else
- vsi->rx_rings[queue]->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+ rx_ring->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
if (ec->use_adaptive_tx_coalesce)
- vsi->tx_rings[queue]->tx_itr_setting |= I40E_ITR_DYNAMIC;
+ tx_ring->tx_itr_setting |= I40E_ITR_DYNAMIC;
else
- vsi->tx_rings[queue]->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
+ tx_ring->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
- q_vector = vsi->rx_rings[queue]->q_vector;
- q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[queue]->rx_itr_setting);
+ q_vector = rx_ring->q_vector;
+ q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting);
vector = vsi->base_vector + q_vector->v_idx;
wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), q_vector->rx.itr);
- q_vector = vsi->tx_rings[queue]->q_vector;
- q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[queue]->tx_itr_setting);
+ q_vector = tx_ring->q_vector;
+ q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting);
vector = vsi->base_vector + q_vector->v_idx;
wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), q_vector->tx.itr);
@@ -2740,16 +2749,16 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
no_input_set:
if (input_set & I40E_L3_SRC_MASK)
- fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFF);
+ fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFFFFFF);
if (input_set & I40E_L3_DST_MASK)
- fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFF);
+ fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFFFFFF);
if (input_set & I40E_L4_SRC_MASK)
- fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFFFFFF);
+ fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFF);
if (input_set & I40E_L4_DST_MASK)
- fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFFFFFF);
+ fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFF);
if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
fsp->ring_cookie = RX_CLS_FLOW_DISC;
@@ -3800,6 +3809,16 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
i40e_write_fd_input_set(pf, index, new_mask);
+ /* IP_USER_FLOW filters match both IPv4/Other and IPv4/Fragmented
+ * frames. If we're programming the input set for IPv4/Other, we also
+ * need to program the IPv4/Fragmented input set. Since we don't have
+ * separate support, we'll always assume and enforce that the two flow
+ * types must have matching input sets.
+ */
+ if (index == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER)
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
+ new_mask);
+
/* Add the new offset and update table, if necessary */
if (new_flex_offset) {
err = i40e_add_flex_offset(&pf->l4_flex_pit_list, src_offset,
@@ -3822,6 +3841,87 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
}
/**
+ * i40e_match_fdir_filter - Return true of two filters match
+ * @a: pointer to filter struct
+ * @b: pointer to filter struct
+ *
+ * Returns true if the two filters match exactly the same criteria. I.e. they
+ * match the same flow type and have the same parameters. We don't need to
+ * check any input-set since all filters of the same flow type must use the
+ * same input set.
+ **/
+static bool i40e_match_fdir_filter(struct i40e_fdir_filter *a,
+ struct i40e_fdir_filter *b)
+{
+ /* The filters do not much if any of these criteria differ. */
+ if (a->dst_ip != b->dst_ip ||
+ a->src_ip != b->src_ip ||
+ a->dst_port != b->dst_port ||
+ a->src_port != b->src_port ||
+ a->flow_type != b->flow_type ||
+ a->ip4_proto != b->ip4_proto)
+ return false;
+
+ return true;
+}
+
+/**
+ * i40e_disallow_matching_filters - Check that new filters differ
+ * @vsi: pointer to the targeted VSI
+ * @input: new filter to check
+ *
+ * Due to hardware limitations, it is not possible for two filters that match
+ * similar criteria to be programmed at the same time. This is true for a few
+ * reasons:
+ *
+ * (a) all filters matching a particular flow type must use the same input
+ * set, that is they must match the same criteria.
+ * (b) different flow types will never match the same packet, as the flow type
+ * is decided by hardware before checking which rules apply.
+ * (c) hardware has no way to distinguish which order filters apply in.
+ *
+ * Due to this, we can't really support using the location data to order
+ * filters in the hardware parsing. It is technically possible for the user to
+ * request two filters matching the same criteria but which select different
+ * queues. In this case, rather than keep both filters in the list, we reject
+ * the 2nd filter when the user requests adding it.
+ *
+ * This avoids needing to track location for programming the filter to
+ * hardware, and ensures that we avoid some strange scenarios involving
+ * deleting filters which match the same criteria.
+ **/
+static int i40e_disallow_matching_filters(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *input)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_fdir_filter *rule;
+ struct hlist_node *node2;
+
+ /* Loop through every filter, and check that it doesn't match */
+ hlist_for_each_entry_safe(rule, node2,
+ &pf->fdir_filter_list, fdir_node) {
+ /* Don't check the filters match if they share the same fd_id,
+ * since the new filter is actually just updating the target
+ * of the old filter.
+ */
+ if (rule->fd_id == input->fd_id)
+ continue;
+
+ /* If any filters match, then print a warning message to the
+ * kernel message buffer and bail out.
+ */
+ if (i40e_match_fdir_filter(rule, input)) {
+ dev_warn(&pf->pdev->dev,
+ "Existing user defined filter %d already matches this flow.\n",
+ rule->fd_id);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
* i40e_add_fdir_ethtool - Add/Remove Flow Director filters
* @vsi: pointer to the targeted VSI
* @cmd: command to get or set RX flow classification rules
@@ -3933,19 +4033,25 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
input->flex_offset = userdef.flex_offset;
}
- ret = i40e_add_del_fdir(vsi, input, true);
+ /* Avoid programming two filters with identical match criteria. */
+ ret = i40e_disallow_matching_filters(vsi, input);
if (ret)
- goto free_input;
+ goto free_filter_memory;
/* Add the input filter to the fdir_input_list, possibly replacing
* a previous filter. Do not free the input structure after adding it
* to the list as this would cause a use-after-free bug.
*/
i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
-
+ ret = i40e_add_del_fdir(vsi, input, true);
+ if (ret)
+ goto remove_sw_rule;
return 0;
-free_input:
+remove_sw_rule:
+ hlist_del(&input->fdir_node);
+ pf->fdir_pf_active_filters--;
+free_filter_memory:
kfree(input);
return ret;
}
@@ -4258,7 +4364,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- u32 orig_flags, new_flags, changed_flags;
+ u64 orig_flags, new_flags, changed_flags;
u32 i, j;
orig_flags = READ_ONCE(pf->flags);
@@ -4309,13 +4415,32 @@ flags_complete:
!(pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE))
return -EOPNOTSUPP;
+ /* Disable FW LLDP not supported if NPAR active or if FW
+ * API version < 1.7
+ */
+ if (new_flags & I40E_FLAG_DISABLE_FW_LLDP) {
+ if (pf->hw.func_caps.npar_enable) {
+ dev_warn(&pf->pdev->dev,
+ "Unable to stop FW LLDP if NPAR active\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (pf->hw.aq.api_maj_ver < 1 ||
+ (pf->hw.aq.api_maj_ver == 1 &&
+ pf->hw.aq.api_min_ver < 7)) {
+ dev_warn(&pf->pdev->dev,
+ "FW ver does not support stopping FW LLDP\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
/* Compare and exchange the new flags into place. If we failed, that
* is if cmpxchg returns anything but the old value, this means that
* something else has modified the flags variable since we copied it
* originally. We'll just punt with an error and log something in the
* message buffer.
*/
- if (cmpxchg(&pf->flags, orig_flags, new_flags) != orig_flags) {
+ if (cmpxchg64(&pf->flags, orig_flags, new_flags) != orig_flags) {
dev_warn(&pf->pdev->dev,
"Unable to update pf->flags as it was modified by another thread...\n");
return -EAGAIN;
@@ -4354,12 +4479,37 @@ flags_complete:
}
}
+ if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
+ if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
+ struct i40e_dcbx_config *dcbcfg;
+ int i;
+
+ i40e_aq_stop_lldp(&pf->hw, true, NULL);
+ i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
+ /* reset local_dcbx_config to default */
+ dcbcfg = &pf->hw.local_dcbx_config;
+ dcbcfg->etscfg.willing = 1;
+ dcbcfg->etscfg.maxtcs = 0;
+ dcbcfg->etscfg.tcbwtable[0] = 100;
+ for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etscfg.tcbwtable[i] = 0;
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+ dcbcfg->etscfg.prioritytable[i] = 0;
+ dcbcfg->etscfg.tsatable[0] = I40E_IEEE_TSA_ETS;
+ dcbcfg->pfc.willing = 1;
+ dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+ } else {
+ i40e_aq_start_lldp(&pf->hw, NULL);
+ }
+ }
+
/* Issue reset to cause things to take effect, as additional bits
* are added we will need to create a mask of bits requiring reset
*/
if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
I40E_FLAG_LEGACY_RX |
- I40E_FLAG_SOURCE_PRUNING_DISABLED))
+ I40E_FLAG_SOURCE_PRUNING_DISABLED |
+ I40E_FLAG_DISABLE_FW_LLDP))
i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
return 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index af792112a2d3..f95ce9b5e4fb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -47,8 +47,8 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 2
-#define DRV_VERSION_MINOR 1
-#define DRV_VERSION_BUILD 14
+#define DRV_VERSION_MINOR 3
+#define DRV_VERSION_BUILD 2
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -1818,6 +1818,10 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
num_tc_qps = qcount / numtc;
num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
+ /* Do not allow use more TC queue pairs than MSI-X vectors exist */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
+
/* Setup queue offset/count for all TCs for given VSI */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
/* See if the given TC is enabled for the given VSI */
@@ -4122,6 +4126,7 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
q_vector->num_ringpairs = num_ringpairs;
+ q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
q_vector->rx.count = 0;
q_vector->tx.count = 0;
@@ -4877,104 +4882,6 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
#endif
/**
- * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
- * @q_idx: TX queue number
- * @vsi: Pointer to VSI struct
- *
- * This function checks specified queue for given VSI. Detects hung condition.
- * We proactively detect hung TX queues by checking if interrupts are disabled
- * but there are pending descriptors. If it appears hung, attempt to recover
- * by triggering a SW interrupt.
- **/
-static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
-{
- struct i40e_ring *tx_ring = NULL;
- struct i40e_pf *pf;
- u32 val, tx_pending;
- int i;
-
- pf = vsi->back;
-
- /* now that we have an index, find the tx_ring struct */
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
- if (q_idx == vsi->tx_rings[i]->queue_index) {
- tx_ring = vsi->tx_rings[i];
- break;
- }
- }
- }
-
- if (!tx_ring)
- return;
-
- /* Read interrupt register */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
- val = rd32(&pf->hw,
- I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
- tx_ring->vsi->base_vector - 1));
- else
- val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
-
- tx_pending = i40e_get_tx_pending(tx_ring);
-
- /* Interrupts are disabled and TX pending is non-zero,
- * trigger the SW interrupt (don't wait). Worst case
- * there will be one extra interrupt which may result
- * into not cleaning any queues because queues are cleaned.
- */
- if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
- i40e_force_wb(vsi, tx_ring->q_vector);
-}
-
-/**
- * i40e_detect_recover_hung - Function to detect and recover hung_queues
- * @pf: pointer to PF struct
- *
- * LAN VSI has netdev and netdev has TX queues. This function is to check
- * each of those TX queues if they are hung, trigger recovery by issuing
- * SW interrupt.
- **/
-static void i40e_detect_recover_hung(struct i40e_pf *pf)
-{
- struct net_device *netdev;
- struct i40e_vsi *vsi;
- unsigned int i;
-
- /* Only for LAN VSI */
- vsi = pf->vsi[pf->lan_vsi];
-
- if (!vsi)
- return;
-
- /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
- if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
- test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
- return;
-
- /* Make sure type is MAIN VSI */
- if (vsi->type != I40E_VSI_MAIN)
- return;
-
- netdev = vsi->netdev;
- if (!netdev)
- return;
-
- /* Bail out if netif_carrier is not OK */
- if (!netif_carrier_ok(netdev))
- return;
-
- /* Go thru' TX queues for netdev */
- for (i = 0; i < netdev->num_tx_queues; i++) {
- struct netdev_queue *q;
-
- q = netdev_get_tx_queue(netdev, i);
- if (q)
- i40e_detect_recover_hung_queue(i, vsi);
- }
-}
-
-/**
* i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
* @pf: pointer to PF
*
@@ -5342,6 +5249,8 @@ static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
{
u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
struct i40e_vsi_context ctxt;
int ret = 0;
int i;
@@ -5359,10 +5268,40 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
if (ret) {
- dev_info(&vsi->back->pdev->dev,
+ struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
+
+ dev_info(&pf->pdev->dev,
"Failed configuring TC map %d for VSI %d\n",
enabled_tc, vsi->seid);
- goto out;
+ ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
+ &bw_config, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed querying vsi bw info, err %s aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ goto out;
+ }
+ if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
+ u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
+
+ if (!valid_tc)
+ valid_tc = bw_config.tc_valid_bits;
+ /* Always enable TC0, no matter what */
+ valid_tc |= 1;
+ dev_info(&pf->pdev->dev,
+ "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
+ enabled_tc, bw_config.tc_valid_bits, valid_tc);
+ enabled_tc = valid_tc;
+ }
+
+ ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Unable to configure TC map %d for VSI %d\n",
+ enabled_tc, vsi->seid);
+ goto out;
+ }
}
/* Update Queue Pairs Mapping for currently enabled UPs */
@@ -5402,13 +5341,12 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
/* Update the VSI after updating the VSI queue-mapping
* information
*/
- ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
- dev_info(&vsi->back->pdev->dev,
+ dev_info(&pf->pdev->dev,
"Update vsi tc config failed, err %s aq_err %s\n",
- i40e_stat_str(&vsi->back->hw, ret),
- i40e_aq_str(&vsi->back->hw,
- vsi->back->hw.aq.asq_last_status));
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
goto out;
}
/* update the local VSI info with updated queue map */
@@ -5418,11 +5356,10 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
/* Update current VSI BW information */
ret = i40e_vsi_get_bw_info(vsi);
if (ret) {
- dev_info(&vsi->back->pdev->dev,
+ dev_info(&pf->pdev->dev,
"Failed updating vsi bw info, err %s aq_err %s\n",
- i40e_stat_str(&vsi->back->hw, ret),
- i40e_aq_str(&vsi->back->hw,
- vsi->back->hw.aq.asq_last_status));
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
goto out;
}
@@ -6388,8 +6325,11 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
struct i40e_hw *hw = &pf->hw;
int err = 0;
- /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
- if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT)
+ /* Do not enable DCB for SW1 and SW2 images even if the FW is capable
+ * Also do not enable DCBx if FW LLDP agent is disabled
+ */
+ if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) ||
+ (pf->flags & I40E_FLAG_DISABLE_FW_LLDP))
goto out;
/* Get the initial DCB configuration */
@@ -6416,6 +6356,9 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
dev_dbg(&pf->pdev->dev,
"DCBX offload is supported for this PF.\n");
}
+ } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
+ pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
} else {
dev_info(&pf->pdev->dev,
"Query for DCB configuration failed, err %s aq_err %s\n",
@@ -7505,11 +7448,6 @@ static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
{
struct i40e_vsi *vsi = np->vsi;
- if (!tc_can_offload(vsi->netdev))
- return -EOPNOTSUPP;
- if (cls_flower->common.chain_index)
- return -EOPNOTSUPP;
-
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
return i40e_configure_clsflower(vsi, cls_flower);
@@ -7527,6 +7465,9 @@ static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
{
struct i40e_netdev_priv *np = cb_priv;
+ if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data))
+ return -EOPNOTSUPP;
+
switch (type) {
case TC_SETUP_CLSFLOWER:
return i40e_setup_tc_cls_flower(np, type_data);
@@ -7744,6 +7685,9 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
/* Reprogram the default input set for Other/IPv4 */
i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
+
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
}
/**
@@ -9077,6 +9021,17 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi)
vsi->uplink_seid);
return ret;
}
+ /* Reconfigure TX queues using QTX_CTL register */
+ ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "failed to configure TX rings for channel %u\n",
+ ch->seid);
+ return ret;
+ }
+ /* update 'next_base_queue' */
+ vsi->next_base_queue = vsi->next_base_queue +
+ ch->num_queue_pairs;
if (ch->max_tx_rate) {
u64 credits = ch->max_tx_rate;
@@ -9280,6 +9235,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
goto end_core_reset;
}
+ /* Enable FW to write a default DCB config on link-up */
+ i40e_aq_set_dcb_parameters(hw, true, NULL);
+
#ifdef CONFIG_I40E_DCB
ret = i40e_init_pf_dcb(pf);
if (ret) {
@@ -9697,7 +9655,7 @@ static void i40e_service_task(struct work_struct *work)
if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
return;
- i40e_detect_recover_hung(pf);
+ i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
i40e_sync_filters_subtask(pf);
i40e_reset_subtask(pf);
i40e_handle_mdd_event(pf);
@@ -10464,10 +10422,9 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
/* set up vector assignment tracking */
size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
pf->irq_pile = kzalloc(size, GFP_KERNEL);
- if (!pf->irq_pile) {
- dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
+ if (!pf->irq_pile)
return -ENOMEM;
- }
+
pf->irq_pile->num_entries = vectors;
pf->irq_pile->search_hint = 0;
@@ -10785,8 +10742,13 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
/* Determine the RSS size of the VSI */
if (!vsi->rss_size) {
u16 qcount;
-
- qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
+ /* If the firmware does something weird during VSI init, we
+ * could end up with zero TCs. Check for that to avoid
+ * divide-by-zero. It probably won't pass traffic, but it also
+ * won't panic.
+ */
+ qcount = vsi->num_queue_pairs /
+ (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
}
if (!vsi->rss_size)
@@ -10974,7 +10936,7 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
ret = i40e_aq_update_nvm(&pf->hw,
I40E_SR_NVM_CONTROL_WORD,
0x10, sizeof(nvm_word),
- &nvm_word, true, NULL);
+ &nvm_word, true, 0, NULL);
/* Save off last admin queue command status before releasing
* the NVM
*/
@@ -11115,13 +11077,13 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->hw.aq.fw_maj_ver >= 6)
pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
- if (pf->hw.func_caps.vmdq) {
+ if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
pf->flags |= I40E_FLAG_VMDQ_ENABLED;
pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
}
- if (pf->hw.func_caps.iwarp) {
+ if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
pf->flags |= I40E_FLAG_IWARP_ENABLED;
/* IWARP needs one extra vector for CQP just like MISC.*/
pf->num_iwarp_msix = (int)num_online_cpus() + 1;
@@ -13598,6 +13560,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, pf);
pci_save_state(pdev);
+
+ /* Enable FW to write default DCB config on link-up */
+ i40e_aq_set_dcb_parameters(hw, true, NULL);
+
#ifdef CONFIG_I40E_DCB
err = i40e_init_pf_dcb(pf);
if (err) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 7689c2ee0d46..76a5cb04e4fe 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -239,8 +239,9 @@ read_nvm_exit:
*
* Writes a 16 bit words buffer to the Shadow RAM using the admin command.
**/
-static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 words, void *data,
+static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
+ u8 module_pointer, u32 offset,
+ u16 words, void *data,
bool last_command)
{
i40e_status ret_code = I40E_ERR_NVM;
@@ -389,7 +390,7 @@ static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data)
{
i40e_status ret_code;
- u16 read_size = *words;
+ u16 read_size;
bool last_cmd = false;
u16 words_read = 0;
u16 i = 0;
@@ -496,7 +497,8 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
ret_code = i40e_aq_update_nvm(hw, module_pointer,
2 * offset, /*bytes*/
2 * words, /*bytes*/
- data, last_command, &cmd_details);
+ data, last_command, 0,
+ &cmd_details);
return ret_code;
}
@@ -677,6 +679,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *perrno);
+static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
static inline u8 i40e_nvmupd_get_module(u32 val)
{
return (u8)(val & I40E_NVM_MOD_PNT_MASK);
@@ -686,6 +691,12 @@ static inline u8 i40e_nvmupd_get_transaction(u32 val)
return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
}
+static inline u8 i40e_nvmupd_get_preservation_flags(u32 val)
+{
+ return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
+ I40E_NVM_PRESERVATION_FLAGS_SHIFT);
+}
+
static const char * const i40e_nvm_update_state_str[] = {
"I40E_NVMUPD_INVALID",
"I40E_NVMUPD_READ_CON",
@@ -703,6 +714,7 @@ static const char * const i40e_nvm_update_state_str[] = {
"I40E_NVMUPD_STATUS",
"I40E_NVMUPD_EXEC_AQ",
"I40E_NVMUPD_GET_AQ_RESULT",
+ "I40E_NVMUPD_GET_AQ_EVENT",
};
/**
@@ -798,9 +810,9 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
* the wait info and return before doing anything else
*/
if (cmd->offset == 0xffff) {
- i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
+ i40e_nvmupd_clear_wait_state(hw);
status = 0;
- goto exit;
+ break;
}
status = I40E_ERR_NOT_READY;
@@ -815,7 +827,7 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
*perrno = -ESRCH;
break;
}
-exit:
+
mutex_unlock(&hw->aq.arq_mutex);
return status;
}
@@ -944,6 +956,10 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
break;
+ case I40E_NVMUPD_GET_AQ_EVENT:
+ status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
+ break;
+
default:
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: bad cmd %s in init state\n",
@@ -1118,38 +1134,53 @@ retry:
}
/**
- * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * i40e_nvmupd_clear_wait_state - clear wait state on hw
* @hw: pointer to the hardware structure
- * @opcode: the event that just happened
**/
-void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
+void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
{
- if (opcode == hw->nvm_wait_opcode) {
- i40e_debug(hw, I40E_DEBUG_NVM,
- "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
- if (hw->nvm_release_on_done) {
- i40e_release_nvm(hw);
- hw->nvm_release_on_done = false;
- }
- hw->nvm_wait_opcode = 0;
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: clearing wait on opcode 0x%04x\n",
+ hw->nvm_wait_opcode);
- if (hw->aq.arq_last_status) {
- hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
- return;
- }
+ if (hw->nvm_release_on_done) {
+ i40e_release_nvm(hw);
+ hw->nvm_release_on_done = false;
+ }
+ hw->nvm_wait_opcode = 0;
- switch (hw->nvmupd_state) {
- case I40E_NVMUPD_STATE_INIT_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- break;
+ if (hw->aq.arq_last_status) {
+ hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
+ return;
+ }
- case I40E_NVMUPD_STATE_WRITE_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
- break;
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
- default:
- break;
- }
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ break;
+
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * @hw: pointer to the hardware structure
+ * @opcode: the event that just happened
+ **/
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
+ struct i40e_aq_desc *desc)
+{
+ u32 aq_desc_len = sizeof(struct i40e_aq_desc);
+
+ if (opcode == hw->nvm_wait_opcode) {
+ memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len);
+ i40e_nvmupd_clear_wait_state(hw);
}
}
@@ -1205,6 +1236,9 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
else if (module == 0)
upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
break;
+ case I40E_NVM_AQE:
+ upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
+ break;
}
break;
@@ -1267,6 +1301,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
u32 aq_data_len;
i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+ if (cmd->offset == 0xffff)
+ return 0;
+
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
@@ -1302,6 +1339,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
}
}
+ if (cmd->offset)
+ memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
+
/* and away we go! */
status = i40e_asq_send_command(hw, aq_desc, buff,
buff_size, &cmd_details);
@@ -1311,6 +1351,7 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
i40e_stat_str(hw, status),
i40e_aq_str(hw, hw->aq.asq_last_status));
*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ return status;
}
/* should we wait for a followup event? */
@@ -1392,6 +1433,40 @@ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
}
/**
+ * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ u32 aq_total_len;
+ u32 aq_desc_len;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen);
+
+ /* check copylength range */
+ if (cmd->data_size > aq_total_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s: copy length %d too big, trimming to %d\n",
+ __func__, cmd->data_size, aq_total_len);
+ cmd->data_size = aq_total_len;
+ }
+
+ memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size);
+
+ return 0;
+}
+
+/**
* i40e_nvmupd_nvm_read - Read NVM
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
@@ -1486,18 +1561,20 @@ static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
i40e_status status = 0;
struct i40e_asq_cmd_details cmd_details;
u8 module, transaction;
+ u8 preservation_flags;
bool last;
transaction = i40e_nvmupd_get_transaction(cmd->config);
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction & I40E_NVM_LCB);
+ preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
status = i40e_aq_update_nvm(hw, module, cmd->offset,
(u16)cmd->data_size, bytes, last,
- &cmd_details);
+ preservation_flags, &cmd_details);
if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 3bb6659db822..83798b7841b9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -214,7 +214,7 @@ i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
- bool last_command,
+ bool last_command, u8 preservation_flags,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
u8 mib_type, void *buff, u16 buff_size,
@@ -225,6 +225,10 @@ i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
+ bool dcb_enable,
+ struct i40e_asq_cmd_details
+ *cmd_details);
i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
@@ -333,7 +337,9 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *);
-void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode);
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
+ struct i40e_aq_desc *desc);
+void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
@@ -343,6 +349,37 @@ static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
return i40e_ptype_lookup[ptype];
}
+/**
+ * i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition
+ * @link_speed: the speed to convert
+ *
+ * Returns the link_speed in terms of the virtchnl interface, for use in
+ * converting link_speed as reported by the AdminQ into the format used for
+ * talking to virtchnl devices. If we can't represent the link speed properly,
+ * report LINK_SPEED_UNKNOWN.
+ **/
+static inline enum virtchnl_link_speed
+i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed)
+{
+ switch (link_speed) {
+ case I40E_LINK_SPEED_100MB:
+ return VIRTCHNL_LINK_SPEED_100MB;
+ case I40E_LINK_SPEED_1GB:
+ return VIRTCHNL_LINK_SPEED_1GB;
+ case I40E_LINK_SPEED_10GB:
+ return VIRTCHNL_LINK_SPEED_10GB;
+ case I40E_LINK_SPEED_40GB:
+ return VIRTCHNL_LINK_SPEED_40GB;
+ case I40E_LINK_SPEED_20GB:
+ return VIRTCHNL_LINK_SPEED_20GB;
+ case I40E_LINK_SPEED_25GB:
+ return VIRTCHNL_LINK_SPEED_25GB;
+ case I40E_LINK_SPEED_UNKNOWN:
+ default:
+ return VIRTCHNL_LINK_SPEED_UNKNOWN;
+ }
+}
+
/* prototype for functions used for SW locks */
/* i40e_common for VF drivers*/
@@ -400,13 +437,15 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
-i40e_status i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+ struct i40e_asq_cmd_details *
+ cmd_details);
+i40e_status i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
u16 buff_size, u8 flags,
- struct i40e_asq_cmd_details *cmd_details);
+ struct i40e_asq_cmd_details *
+ cmd_details);
struct i40e_generic_seg_header *
i40e_find_segment_in_package(u32 segment_type,
struct i40e_package_header *pkg_header);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
index 5f9cac55aa55..afb72e711d43 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
@@ -95,6 +95,7 @@ enum i40e_status_code {
I40E_ERR_NOT_READY = -63,
I40E_NOT_SUPPORTED = -64,
I40E_ERR_FIRMWARE_API_VERSION = -65,
+ I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
};
#endif /* _I40E_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 5bc2748ac468..e554aa6cf070 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -27,6 +27,7 @@
#include <linux/prefetch.h>
#include <net/busy_poll.h>
#include <linux/bpf_trace.h>
+#include <net/xdp.h>
#include "i40e.h"
#include "i40e_trace.h"
#include "i40e_prototype.h"
@@ -725,6 +726,59 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring)
return 0;
}
+/**
+ * i40e_detect_recover_hung - Function to detect and recover hung_queues
+ * @vsi: pointer to vsi struct with tx queues
+ *
+ * VSI has netdev and netdev has TX queues. This function is to check each of
+ * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
+ **/
+void i40e_detect_recover_hung(struct i40e_vsi *vsi)
+{
+ struct i40e_ring *tx_ring = NULL;
+ struct net_device *netdev;
+ unsigned int i;
+ int packets;
+
+ if (!vsi)
+ return;
+
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
+ return;
+
+ netdev = vsi->netdev;
+ if (!netdev)
+ return;
+
+ if (!netif_carrier_ok(netdev))
+ return;
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ tx_ring = vsi->tx_rings[i];
+ if (tx_ring && tx_ring->desc) {
+ /* If packet counter has not changed the queue is
+ * likely stalled, so force an interrupt for this
+ * queue.
+ *
+ * prev_pkt_ctr would be negative if there was no
+ * pending work.
+ */
+ packets = tx_ring->stats.packets & INT_MAX;
+ if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
+ i40e_force_wb(vsi, tx_ring->q_vector);
+ continue;
+ }
+
+ /* Memory barrier between read of packet count and call
+ * to i40e_get_tx_pending()
+ */
+ smp_rmb();
+ tx_ring->tx_stats.prev_pkt_ctr =
+ i40e_get_tx_pending(tx_ring) ? packets : -1;
+ }
+ }
+}
+
#define WB_STRIDE 4
/**
@@ -902,7 +956,7 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
wr32(&vsi->back->hw,
- I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
+ I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
val);
} else {
val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
@@ -929,8 +983,7 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
/* allow 00 to be written to the index */
wr32(&vsi->back->hw,
- I40E_PFINT_DYN_CTLN(q_vector->v_idx +
- vsi->base_vector - 1), val);
+ I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
} else {
u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
@@ -1162,6 +1215,7 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
+ tx_ring->tx_stats.prev_pkt_ctr = -1;
return 0;
err:
@@ -1236,6 +1290,8 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
void i40e_free_rx_resources(struct i40e_ring *rx_ring)
{
i40e_clean_rx_ring(rx_ring);
+ if (rx_ring->vsi->type == I40E_VSI_MAIN)
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
rx_ring->xdp_prog = NULL;
kfree(rx_ring->rx_bi);
rx_ring->rx_bi = NULL;
@@ -1256,6 +1312,7 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
+ int err = -ENOMEM;
int bi_size;
/* warn if we are about to overwrite the pointer */
@@ -1283,13 +1340,21 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
+ /* XDP RX-queue info only needed for RX rings exposed to XDP */
+ if (rx_ring->vsi->type == I40E_VSI_MAIN) {
+ err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
+ rx_ring->queue_index);
+ if (err < 0)
+ goto err;
+ }
+
rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
return 0;
err:
kfree(rx_ring->rx_bi);
rx_ring->rx_bi = NULL;
- return -ENOMEM;
+ return err;
}
/**
@@ -2068,11 +2133,13 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
bool failure = false, xdp_xmit = false;
+ struct xdp_buff xdp;
+
+ xdp.rxq = &rx_ring->xdp_rxq;
while (likely(total_rx_packets < (unsigned int)budget)) {
struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc;
- struct xdp_buff xdp;
unsigned int size;
u16 vlan_tag;
u8 rx_ptype;
@@ -2243,7 +2310,6 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
struct i40e_hw *hw = &vsi->back->hw;
bool rx = false, tx = false;
u32 rxval, txval;
- int vector;
int idx = q_vector->v_idx;
int rx_itr_setting, tx_itr_setting;
@@ -2253,8 +2319,6 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
return;
}
- vector = (q_vector->v_idx + vsi->base_vector);
-
/* avoid dynamic calculation if in countdown mode OR if
* all dynamic is disabled
*/
@@ -2303,12 +2367,12 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
rxval |= BIT(31);
/* don't check _DOWN because interrupt isn't being enabled */
- wr32(hw, INTREG(vector - 1), rxval);
+ wr32(hw, INTREG(q_vector->reg_idx), rxval);
}
enable_int:
if (!test_bit(__I40E_VSI_DOWN, vsi->state))
- wr32(hw, INTREG(vector - 1), txval);
+ wr32(hw, INTREG(q_vector->reg_idx), txval);
if (q_vector->itr_countdown)
q_vector->itr_countdown--;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index fbae1182e2ea..701b708628b0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -27,6 +27,8 @@
#ifndef _I40E_TXRX_H_
#define _I40E_TXRX_H_
+#include <net/xdp.h>
+
/* Interrupt Throttling and Rate Limiting Goodies */
#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
@@ -277,7 +279,7 @@ static inline unsigned int i40e_txd_use_count(unsigned int size)
}
/* Tx Descriptors needed, worst case */
-#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 6)
#define I40E_MIN_DESC_PENDING 4
#define I40E_TX_FLAGS_HW_VLAN BIT(1)
@@ -331,6 +333,7 @@ struct i40e_tx_queue_stats {
u64 tx_done_old;
u64 tx_linearize;
u64 tx_force_wb;
+ int prev_pkt_ctr;
};
struct i40e_rx_queue_stats {
@@ -428,6 +431,7 @@ struct i40e_ring {
*/
struct i40e_channel *ch;
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_internodealigned_in_smp;
static inline bool ring_uses_build_skb(struct i40e_ring *ring)
@@ -498,6 +502,7 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring);
int i40e_napi_poll(struct napi_struct *napi, int budget);
void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
u32 i40e_get_tx_pending(struct i40e_ring *ring);
+void i40e_detect_recover_hung(struct i40e_vsi *vsi);
int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40e_chk_linearize(struct sk_buff *skb);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 0e8568719b4e..cd294e6a8587 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -402,6 +402,7 @@ enum i40e_nvmupd_cmd {
I40E_NVMUPD_STATUS,
I40E_NVMUPD_EXEC_AQ,
I40E_NVMUPD_GET_AQ_RESULT,
+ I40E_NVMUPD_GET_AQ_EVENT,
};
enum i40e_nvmupd_state {
@@ -421,15 +422,21 @@ enum i40e_nvmupd_state {
#define I40E_NVM_MOD_PNT_MASK 0xFF
-#define I40E_NVM_TRANS_SHIFT 8
-#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
-#define I40E_NVM_CON 0x0
-#define I40E_NVM_SNT 0x1
-#define I40E_NVM_LCB 0x2
-#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
-#define I40E_NVM_ERA 0x4
-#define I40E_NVM_CSUM 0x8
-#define I40E_NVM_EXEC 0xf
+#define I40E_NVM_TRANS_SHIFT 8
+#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
+#define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12
+#define I40E_NVM_PRESERVATION_FLAGS_MASK \
+ (0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT)
+#define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01
+#define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02
+#define I40E_NVM_CON 0x0
+#define I40E_NVM_SNT 0x1
+#define I40E_NVM_LCB 0x2
+#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
+#define I40E_NVM_ERA 0x4
+#define I40E_NVM_CSUM 0x8
+#define I40E_NVM_AQE 0xe
+#define I40E_NVM_EXEC 0xf
#define I40E_NVM_ADAPT_SHIFT 16
#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT)
@@ -611,6 +618,7 @@ struct i40e_hw {
/* state of nvm update process */
enum i40e_nvmupd_state nvmupd_state;
struct i40e_aq_desc nvm_wb_desc;
+ struct i40e_aq_desc nvm_aq_event_desc;
struct i40e_virt_mem nvm_buff;
bool nvm_release_on_done;
u16 nvm_wait_opcode;
@@ -1502,19 +1510,19 @@ struct i40e_lldp_variables {
#define I40E_FLEX_57_SHIFT 6
#define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT)
-/* Version format for PPP */
-struct i40e_ppp_version {
+/* Version format for Dynamic Device Personalization(DDP) */
+struct i40e_ddp_version {
u8 major;
u8 minor;
u8 update;
u8 draft;
};
-#define I40E_PPP_NAME_SIZE 32
+#define I40E_DDP_NAME_SIZE 32
/* Package header */
struct i40e_package_header {
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 segment_count;
u32 segment_offset[1];
};
@@ -1526,16 +1534,16 @@ struct i40e_generic_seg_header {
#define SEGMENT_TYPE_I40E 0x00000011
#define SEGMENT_TYPE_X722 0x00000012
u32 type;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 size;
- char name[I40E_PPP_NAME_SIZE];
+ char name[I40E_DDP_NAME_SIZE];
};
struct i40e_metadata_segment {
struct i40e_generic_seg_header header;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 track_id;
- char name[I40E_PPP_NAME_SIZE];
+ char name[I40E_DDP_NAME_SIZE];
};
struct i40e_device_id_entry {
@@ -1545,8 +1553,8 @@ struct i40e_device_id_entry {
struct i40e_profile_segment {
struct i40e_generic_seg_header header;
- struct i40e_ppp_version version;
- char name[I40E_PPP_NAME_SIZE];
+ struct i40e_ddp_version version;
+ char name[I40E_DDP_NAME_SIZE];
u32 device_table_count;
struct i40e_device_id_entry device_table[1];
};
@@ -1573,11 +1581,11 @@ struct i40e_profile_section_header {
struct i40e_profile_info {
u32 track_id;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u8 op;
-#define I40E_PPP_ADD_TRACKID 0x01
-#define I40E_PPP_REMOVE_TRACKID 0x02
+#define I40E_DDP_ADD_TRACKID 0x01
+#define I40E_DDP_REMOVE_TRACKID 0x02
u8 reserved[7];
- u8 name[I40E_PPP_NAME_SIZE];
+ u8 name[I40E_DDP_NAME_SIZE];
};
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 36cb8e068e85..e9309fb9084b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -81,12 +81,12 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
if (vf->link_forced) {
pfe.event_data.link_event.link_status = vf->link_up;
pfe.event_data.link_event.link_speed =
- (vf->link_up ? I40E_LINK_SPEED_40GB : 0);
+ (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
} else {
pfe.event_data.link_event.link_status =
ls->link_info & I40E_AQ_LINK_UP;
pfe.event_data.link_event.link_speed =
- (enum virtchnl_link_speed)ls->link_speed;
+ i40e_virtchnl_link_speed(ls->link_speed);
}
i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
0, (u8 *)&pfe, sizeof(pfe), NULL);
@@ -2749,6 +2749,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
break;
case VIRTCHNL_OP_GET_VF_RESOURCES:
ret = i40e_vc_get_vf_resources_msg(vf, msg);
+ i40e_vc_notify_vf_link_state(vf);
break;
case VIRTCHNL_OP_RESET_VF:
i40e_vc_reset_vf_msg(vf);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index 8b0d4b255dea..d1aab6b8bfb1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -837,10 +837,15 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
- i40e_debug(hw,
- I40E_DEBUG_AQ_MESSAGE,
- "AQTX: Writeback timeout.\n");
- status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ if (rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: AQ Critical error.\n");
+ status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
+ } else {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
}
asq_send_command_error:
@@ -901,7 +906,7 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
}
/* set next_to_use to head */
- ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
+ ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index 06b04572c518..815de8d9c3fb 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -198,13 +198,14 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
- /* Pipeline Personalization Profile */
+ /* Dynamic Device Personalization */
i40e_aqc_opc_write_personalization_profile = 0x0270,
i40e_aqc_opc_get_personalization_profile_list = 0x0271,
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
+ i40e_aqc_opc_set_dcb_parameters = 0x0303,
/* TX scheduler */
i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
@@ -1562,7 +1563,7 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
-/* Pipeline Personalization Profile */
+/* Dynamic Device Personalization */
struct i40e_aqc_write_personalization_profile {
u8 flags;
u8 reserved[3];
@@ -1573,7 +1574,7 @@ struct i40e_aqc_write_personalization_profile {
I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile);
-struct i40e_aqc_write_ppp_resp {
+struct i40e_aqc_write_ddp_resp {
__le32 error_offset;
__le32 error_info;
__le32 addr_high;
@@ -1582,8 +1583,8 @@ struct i40e_aqc_write_ppp_resp {
struct i40e_aqc_get_applied_profiles {
u8 flags;
-#define I40E_AQC_GET_PPP_GET_CONF 0x1
-#define I40E_AQC_GET_PPP_GET_RDPU_CONF 0x2
+#define I40E_AQC_GET_DDP_GET_CONF 0x1
+#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2
u8 rsv[3];
__le32 reserved;
__le32 addr_high;
@@ -2196,8 +2197,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
*/
struct i40e_aqc_nvm_update {
u8 command_flags;
-#define I40E_AQ_NVM_LAST_CMD 0x01
-#define I40E_AQ_NVM_FLASH_ONLY 0x80
+#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_FLASH_ONLY 0x80
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01
u8 module_pointer;
__le16 length;
__le32 offset;
@@ -2457,6 +2462,17 @@ struct i40e_aqc_lldp_start {
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
+/* Set DCB (direct 0x0303) */
+struct i40e_aqc_set_dcb_parameters {
+ u8 command;
+#define I40E_AQ_DCB_SET_AGENT 0x1
+#define I40E_DCB_VALID 0x1
+ u8 valid_flags;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters);
+
/* Apply MIB changes (0x0A07)
* uses the generic struc as it contains no data
*/
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 7d70bf69b249..67bf5cebb76f 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -284,6 +284,8 @@ const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
return "I40E_NOT_SUPPORTED";
case I40E_ERR_FIRMWARE_API_VERSION:
return "I40E_ERR_FIRMWARE_API_VERSION";
+ case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
+ return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
}
snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
@@ -1202,7 +1204,7 @@ i40e_status i40e_vf_reset(struct i40e_hw *hw)
}
/**
- * i40evf_aq_write_ppp - Write pipeline personalization profile (ppp)
+ * i40evf_aq_write_ddp - Write dynamic device personalization (ddp)
* @hw: pointer to the hw struct
* @buff: command buffer (size in bytes = buff_size)
* @buff_size: buffer size in bytes
@@ -1212,7 +1214,7 @@ i40e_status i40e_vf_reset(struct i40e_hw *hw)
* @cmd_details: pointer to command details structure or NULL
**/
enum
-i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status_code i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
struct i40e_asq_cmd_details *cmd_details)
@@ -1221,7 +1223,7 @@ i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
struct i40e_aqc_write_personalization_profile *cmd =
(struct i40e_aqc_write_personalization_profile *)
&desc.params.raw;
- struct i40e_aqc_write_ppp_resp *resp;
+ struct i40e_aqc_write_ddp_resp *resp;
i40e_status status;
i40evf_fill_default_direct_cmd_desc(&desc,
@@ -1237,7 +1239,7 @@ i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
if (!status) {
- resp = (struct i40e_aqc_write_ppp_resp *)&desc.params.raw;
+ resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
if (error_offset)
*error_offset = le32_to_cpu(resp->error_offset);
if (error_info)
@@ -1248,16 +1250,16 @@ i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
}
/**
- * i40evf_aq_get_ppp_list - Read pipeline personalization profile (ppp)
+ * i40evf_aq_get_ddp_list - Read dynamic device personalization (ddp)
* @hw: pointer to the hw struct
* @buff: command buffer (size in bytes = buff_size)
* @buff_size: buffer size in bytes
* @cmd_details: pointer to command details structure or NULL
**/
enum
-i40e_status_code i40evf_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+i40e_status_code i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
u16 buff_size, u8 flags,
- struct i40e_asq_cmd_details *cmd_details)
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_get_applied_profiles *cmd =
@@ -1330,11 +1332,6 @@ i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u32 offset = 0, info = 0;
u32 i;
- if (!track_id) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0.");
- return I40E_NOT_SUPPORTED;
- }
-
dev_cnt = profile->device_table_count;
for (i = 0; i < dev_cnt; i++) {
@@ -1344,7 +1341,7 @@ i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
break;
}
if (i == dev_cnt) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support PPP");
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP");
return I40E_ERR_DEVICE_NOT_SUPPORTED;
}
@@ -1363,7 +1360,7 @@ i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
sizeof(struct i40e_profile_section_header);
/* Write profile */
- status = i40evf_aq_write_ppp(hw, (void *)sec, (u16)section_size,
+ status = i40evf_aq_write_ddp(hw, (void *)sec, (u16)section_size,
track_id, &offset, &info, NULL);
if (status) {
i40e_debug(hw, I40E_DEBUG_PACKAGE,
@@ -1405,10 +1402,10 @@ i40evf_add_pinfo_to_list(struct i40e_hw *hw,
sec->section.offset);
pinfo->track_id = track_id;
pinfo->version = profile->version;
- pinfo->op = I40E_PPP_ADD_TRACKID;
- memcpy(pinfo->name, profile->name, I40E_PPP_NAME_SIZE);
+ pinfo->op = I40E_DDP_ADD_TRACKID;
+ memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
- status = i40evf_aq_write_ppp(hw, (void *)sec, sec->data_end,
+ status = i40evf_aq_write_ddp(hw, (void *)sec, sec->data_end,
track_id, &offset, &info, NULL);
return status;
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index b624b5994075..47c429931a57 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -131,13 +131,15 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
-i40e_status i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40evf_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+ struct i40e_asq_cmd_details *
+ cmd_details);
+i40e_status i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
u16 buff_size, u8 flags,
- struct i40e_asq_cmd_details *cmd_details);
+ struct i40e_asq_cmd_details *
+ cmd_details);
struct i40e_generic_seg_header *
i40evf_find_segment_in_package(u32 segment_type,
struct i40e_package_header *pkg_header);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/i40evf/i40e_status.h
index 7fa7a41915c1..5b222246e08b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_status.h
@@ -95,6 +95,7 @@ enum i40e_status_code {
I40E_ERR_NOT_READY = -63,
I40E_NOT_SUPPORTED = -64,
I40E_ERR_FIRMWARE_API_VERSION = -65,
+ I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
};
#endif /* _I40E_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 1ba29bb85b67..357d6051281f 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -148,6 +148,59 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
return 0;
}
+/**
+ * i40evf_detect_recover_hung - Function to detect and recover hung_queues
+ * @vsi: pointer to vsi struct with tx queues
+ *
+ * VSI has netdev and netdev has TX queues. This function is to check each of
+ * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
+ **/
+void i40evf_detect_recover_hung(struct i40e_vsi *vsi)
+{
+ struct i40e_ring *tx_ring = NULL;
+ struct net_device *netdev;
+ unsigned int i;
+ int packets;
+
+ if (!vsi)
+ return;
+
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
+ return;
+
+ netdev = vsi->netdev;
+ if (!netdev)
+ return;
+
+ if (!netif_carrier_ok(netdev))
+ return;
+
+ for (i = 0; i < vsi->back->num_active_queues; i++) {
+ tx_ring = &vsi->back->tx_rings[i];
+ if (tx_ring && tx_ring->desc) {
+ /* If packet counter has not changed the queue is
+ * likely stalled, so force an interrupt for this
+ * queue.
+ *
+ * prev_pkt_ctr would be negative if there was no
+ * pending work.
+ */
+ packets = tx_ring->stats.packets & INT_MAX;
+ if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
+ i40evf_force_wb(vsi, tx_ring->q_vector);
+ continue;
+ }
+
+ /* Memory barrier between read of packet count and call
+ * to i40evf_get_tx_pending()
+ */
+ smp_rmb();
+ tx_ring->tx_stats.prev_pkt_ctr =
+ i40evf_get_tx_pending(tx_ring, false) ? packets : -1;
+ }
+ }
+}
+
#define WB_STRIDE 4
/**
@@ -316,8 +369,7 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */
wr32(&vsi->back->hw,
- I40E_VFINT_DYN_CTLN1(q_vector->v_idx +
- vsi->base_vector - 1), val);
+ I40E_VFINT_DYN_CTLN1(q_vector->reg_idx), val);
q_vector->arm_wb_state = true;
}
@@ -336,7 +388,7 @@ void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
/* allow 00 to be written to the index */;
wr32(&vsi->back->hw,
- I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1),
+ I40E_VFINT_DYN_CTLN1(q_vector->reg_idx),
val);
}
@@ -469,6 +521,7 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
+ tx_ring->tx_stats.prev_pkt_ctr = -1;
return 0;
err:
@@ -1444,12 +1497,9 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
struct i40e_hw *hw = &vsi->back->hw;
bool rx = false, tx = false;
u32 rxval, txval;
- int vector;
int idx = q_vector->v_idx;
int rx_itr_setting, tx_itr_setting;
- vector = (q_vector->v_idx + vsi->base_vector);
-
/* avoid dynamic calculation if in countdown mode OR if
* all dynamic is disabled
*/
@@ -1498,12 +1548,12 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
rxval |= BIT(31);
/* don't check _DOWN because interrupt isn't being enabled */
- wr32(hw, INTREG(vector - 1), rxval);
+ wr32(hw, INTREG(q_vector->reg_idx), rxval);
}
enable_int:
if (!test_bit(__I40E_VSI_DOWN, vsi->state))
- wr32(hw, INTREG(vector - 1), txval);
+ wr32(hw, INTREG(q_vector->reg_idx), txval);
if (q_vector->itr_countdown)
q_vector->itr_countdown--;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 8d26c85d12e1..7798a6645c3f 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -260,7 +260,7 @@ static inline unsigned int i40e_txd_use_count(unsigned int size)
}
/* Tx Descriptors needed, worst case */
-#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 6)
#define I40E_MIN_DESC_PENDING 4
#define I40E_TX_FLAGS_HW_VLAN BIT(1)
@@ -313,6 +313,7 @@ struct i40e_tx_queue_stats {
u64 tx_done_old;
u64 tx_linearize;
u64 tx_force_wb;
+ int prev_pkt_ctr;
u64 tx_lost_interrupt;
};
@@ -467,6 +468,7 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring);
int i40evf_napi_poll(struct napi_struct *napi, int budget);
void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw);
+void i40evf_detect_recover_hung(struct i40e_vsi *vsi);
int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40evf_chk_linearize(struct sk_buff *skb);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 213b773dfad6..54951c84a481 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -361,6 +361,7 @@ enum i40e_nvmupd_cmd {
I40E_NVMUPD_STATUS,
I40E_NVMUPD_EXEC_AQ,
I40E_NVMUPD_GET_AQ_RESULT,
+ I40E_NVMUPD_GET_AQ_EVENT,
};
enum i40e_nvmupd_state {
@@ -380,15 +381,21 @@ enum i40e_nvmupd_state {
#define I40E_NVM_MOD_PNT_MASK 0xFF
-#define I40E_NVM_TRANS_SHIFT 8
-#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
-#define I40E_NVM_CON 0x0
-#define I40E_NVM_SNT 0x1
-#define I40E_NVM_LCB 0x2
-#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
-#define I40E_NVM_ERA 0x4
-#define I40E_NVM_CSUM 0x8
-#define I40E_NVM_EXEC 0xf
+#define I40E_NVM_TRANS_SHIFT 8
+#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
+#define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12
+#define I40E_NVM_PRESERVATION_FLAGS_MASK \
+ (0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT)
+#define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01
+#define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02
+#define I40E_NVM_CON 0x0
+#define I40E_NVM_SNT 0x1
+#define I40E_NVM_LCB 0x2
+#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
+#define I40E_NVM_ERA 0x4
+#define I40E_NVM_CSUM 0x8
+#define I40E_NVM_AQE 0xe
+#define I40E_NVM_EXEC 0xf
#define I40E_NVM_ADAPT_SHIFT 16
#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT)
@@ -561,6 +568,7 @@ struct i40e_hw {
/* state of nvm update process */
enum i40e_nvmupd_state nvmupd_state;
struct i40e_aq_desc nvm_wb_desc;
+ struct i40e_aq_desc nvm_aq_event_desc;
struct i40e_virt_mem nvm_buff;
bool nvm_release_on_done;
u16 nvm_wait_opcode;
@@ -1422,19 +1430,19 @@ enum i40e_reset_type {
#define I40E_FD_INSET_FLEX_WORD57_MASK (0x1ULL << \
I40E_FD_INSET_FLEX_WORD57_SHIFT)
-/* Version format for PPP */
-struct i40e_ppp_version {
+/* Version format for Dynamic Device Personalization(DDP) */
+struct i40e_ddp_version {
u8 major;
u8 minor;
u8 update;
u8 draft;
};
-#define I40E_PPP_NAME_SIZE 32
+#define I40E_DDP_NAME_SIZE 32
/* Package header */
struct i40e_package_header {
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 segment_count;
u32 segment_offset[1];
};
@@ -1446,16 +1454,16 @@ struct i40e_generic_seg_header {
#define SEGMENT_TYPE_I40E 0x00000011
#define SEGMENT_TYPE_X722 0x00000012
u32 type;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 size;
- char name[I40E_PPP_NAME_SIZE];
+ char name[I40E_DDP_NAME_SIZE];
};
struct i40e_metadata_segment {
struct i40e_generic_seg_header header;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 track_id;
- char name[I40E_PPP_NAME_SIZE];
+ char name[I40E_DDP_NAME_SIZE];
};
struct i40e_device_id_entry {
@@ -1465,8 +1473,8 @@ struct i40e_device_id_entry {
struct i40e_profile_segment {
struct i40e_generic_seg_header header;
- struct i40e_ppp_version version;
- char name[I40E_PPP_NAME_SIZE];
+ struct i40e_ddp_version version;
+ char name[I40E_DDP_NAME_SIZE];
u32 device_table_count;
struct i40e_device_id_entry device_table[1];
};
@@ -1493,11 +1501,11 @@ struct i40e_profile_section_header {
struct i40e_profile_info {
u32 track_id;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u8 op;
-#define I40E_PPP_ADD_TRACKID 0x01
-#define I40E_PPP_REMOVE_TRACKID 0x02
+#define I40E_DDP_ADD_TRACKID 0x01
+#define I40E_DDP_REMOVE_TRACKID 0x02
u8 reserved[7];
- u8 name[I40E_PPP_NAME_SIZE];
+ u8 name[I40E_DDP_NAME_SIZE];
};
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index de0af521d602..9690c1ea019e 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -114,14 +114,14 @@ struct i40e_q_vector {
struct i40evf_adapter *adapter;
struct i40e_vsi *vsi;
struct napi_struct napi;
- unsigned long reg_idx;
struct i40e_ring_container rx;
struct i40e_ring_container tx;
u32 ring_mask;
u8 num_ringpairs; /* total number of ring pairs in vector */
#define ITR_COUNTDOWN_START 100
u8 itr_countdown; /* when 0 or 1 update ITR */
- int v_idx; /* vector index in list */
+ u16 v_idx; /* index in the vsi->q_vector array. */
+ u16 reg_idx; /* register index of the interrupt */
char name[IFNAMSIZ + 15];
bool arm_wb_state;
cpumask_t affinity_mask;
@@ -187,6 +187,7 @@ enum i40evf_state_t {
enum i40evf_critical_section_t {
__I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */
__I40EVF_IN_CLIENT_TASK,
+ __I40EVF_IN_REMOVE_TASK, /* device being removed */
};
/* board specific private data structure */
@@ -199,6 +200,9 @@ struct i40evf_adapter {
wait_queue_head_t down_waitqueue;
struct i40e_q_vector *q_vectors;
struct list_head vlan_filter_list;
+ struct list_head mac_filter_list;
+ /* Lock to protect accesses to MAC and VLAN lists */
+ spinlock_t mac_vlan_list_lock;
char misc_vector_name[IFNAMSIZ + 9];
int num_active_queues;
int num_req_queues;
@@ -206,7 +210,6 @@ struct i40evf_adapter {
/* TX */
struct i40e_ring *tx_rings;
u32 tx_timeout_count;
- struct list_head mac_filter_list;
u32 tx_desc_count;
/* RX */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index da006fa3fec1..e2d8aa19d205 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -512,31 +512,31 @@ static void i40evf_set_itr_per_queue(struct i40evf_adapter *adapter,
struct ethtool_coalesce *ec,
int queue)
{
+ struct i40e_ring *rx_ring = &adapter->rx_rings[queue];
+ struct i40e_ring *tx_ring = &adapter->tx_rings[queue];
struct i40e_vsi *vsi = &adapter->vsi;
struct i40e_hw *hw = &adapter->hw;
struct i40e_q_vector *q_vector;
u16 vector;
- adapter->rx_rings[queue].rx_itr_setting = ec->rx_coalesce_usecs;
- adapter->tx_rings[queue].tx_itr_setting = ec->tx_coalesce_usecs;
+ rx_ring->rx_itr_setting = ec->rx_coalesce_usecs;
+ tx_ring->tx_itr_setting = ec->tx_coalesce_usecs;
- if (ec->use_adaptive_rx_coalesce)
- adapter->rx_rings[queue].rx_itr_setting |= I40E_ITR_DYNAMIC;
- else
- adapter->rx_rings[queue].rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+ rx_ring->rx_itr_setting |= I40E_ITR_DYNAMIC;
+ if (!ec->use_adaptive_rx_coalesce)
+ rx_ring->rx_itr_setting ^= I40E_ITR_DYNAMIC;
- if (ec->use_adaptive_tx_coalesce)
- adapter->tx_rings[queue].tx_itr_setting |= I40E_ITR_DYNAMIC;
- else
- adapter->tx_rings[queue].tx_itr_setting &= ~I40E_ITR_DYNAMIC;
+ tx_ring->tx_itr_setting |= I40E_ITR_DYNAMIC;
+ if (!ec->use_adaptive_tx_coalesce)
+ tx_ring->tx_itr_setting ^= I40E_ITR_DYNAMIC;
- q_vector = adapter->rx_rings[queue].q_vector;
- q_vector->rx.itr = ITR_TO_REG(adapter->rx_rings[queue].rx_itr_setting);
+ q_vector = rx_ring->q_vector;
+ q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting);
vector = vsi->base_vector + q_vector->v_idx;
wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1), q_vector->rx.itr);
- q_vector = adapter->tx_rings[queue].q_vector;
- q_vector->tx.itr = ITR_TO_REG(adapter->tx_rings[queue].tx_itr_setting);
+ q_vector = tx_ring->q_vector;
+ q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting);
vector = vsi->base_vector + q_vector->v_idx;
wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1), q_vector->tx.itr);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 7b2a4eba92e2..16989ad2ca90 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -45,8 +45,8 @@ static const char i40evf_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 3
-#define DRV_VERSION_MINOR 0
-#define DRV_VERSION_BUILD 1
+#define DRV_VERSION_MINOR 2
+#define DRV_VERSION_BUILD 2
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@@ -276,37 +276,7 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
if (mask & BIT(i - 1)) {
wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
- I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK);
- }
- }
-}
-
-/**
- * i40evf_fire_sw_int - Generate SW interrupt for specified vectors
- * @adapter: board private structure
- * @mask: bitmap of vectors to trigger
- **/
-static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
-{
- struct i40e_hw *hw = &adapter->hw;
- int i;
- u32 dyn_ctl;
-
- if (mask & 1) {
- dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01);
- dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
- I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
- I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
- wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl);
- }
- for (i = 1; i < adapter->num_msix_vectors; i++) {
- if (mask & BIT(i)) {
- dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
- dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
- I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
- I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
- wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl);
+ I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
}
}
}
@@ -337,15 +307,10 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data)
struct net_device *netdev = data;
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_hw *hw = &adapter->hw;
- u32 val;
/* handle non-queue interrupts, these reads clear the registers */
- val = rd32(hw, I40E_VFINT_ICR01);
- val = rd32(hw, I40E_VFINT_ICR0_ENA1);
-
- val = rd32(hw, I40E_VFINT_DYN_CTL01) |
- I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
- wr32(hw, I40E_VFINT_DYN_CTL01, val);
+ rd32(hw, I40E_VFINT_ICR01);
+ rd32(hw, I40E_VFINT_ICR0_ENA1);
/* schedule work on the private workqueue */
schedule_work(&adapter->adminq_task);
@@ -706,7 +671,8 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter)
* @adapter: board private structure
* @vlan: vlan tag
*
- * Returns ptr to the filter object or NULL
+ * Returns ptr to the filter object or NULL. Must be called while holding the
+ * mac_vlan_list_lock.
**/
static struct
i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
@@ -731,14 +697,8 @@ static struct
i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
{
struct i40evf_vlan_filter *f = NULL;
- int count = 50;
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section)) {
- udelay(1);
- if (--count == 0)
- goto out;
- }
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
f = i40evf_find_vlan(adapter, vlan);
if (!f) {
@@ -755,8 +715,7 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
}
clearout:
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
-out:
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return f;
}
@@ -768,21 +727,16 @@ out:
static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
{
struct i40evf_vlan_filter *f;
- int count = 50;
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section)) {
- udelay(1);
- if (--count == 0)
- return;
- }
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
f = i40evf_find_vlan(adapter, vlan);
if (f) {
f->remove = true;
adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
}
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
/**
@@ -824,7 +778,8 @@ static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
* @adapter: board private structure
* @macaddr: the MAC address
*
- * Returns ptr to the filter object or NULL
+ * Returns ptr to the filter object or NULL. Must be called while holding the
+ * mac_vlan_list_lock.
**/
static struct
i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
@@ -854,26 +809,17 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
u8 *macaddr)
{
struct i40evf_mac_filter *f;
- int count = 50;
if (!macaddr)
return NULL;
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section)) {
- udelay(1);
- if (--count == 0)
- return NULL;
- }
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
f = i40evf_find_filter(adapter, macaddr);
if (!f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC);
- if (!f) {
- clear_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section);
- return NULL;
- }
+ if (!f)
+ goto clearout;
ether_addr_copy(f->macaddr, macaddr);
@@ -884,7 +830,8 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
f->remove = false;
}
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+clearout:
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return f;
}
@@ -911,12 +858,16 @@ static int i40evf_set_mac(struct net_device *netdev, void *p)
if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF)
return -EPERM;
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
f = i40evf_find_filter(adapter, hw->mac.addr);
if (f) {
f->remove = true;
adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
}
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
f = i40evf_add_filter(adapter, addr->sa_data);
if (f) {
ether_addr_copy(hw->mac.addr, addr->sa_data);
@@ -937,7 +888,6 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
struct netdev_hw_addr *uca;
struct netdev_hw_addr *mca;
struct netdev_hw_addr *ha;
- int count = 50;
/* add addr if not already in the filter list */
netdev_for_each_uc_addr(uca, netdev) {
@@ -947,16 +897,8 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
i40evf_add_filter(adapter, mca->addr);
}
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section)) {
- udelay(1);
- if (--count == 0) {
- dev_err(&adapter->pdev->dev,
- "Failed to get lock in %s\n", __func__);
- return;
- }
- }
- /* remove filter if not in netdev list */
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
netdev_for_each_mc_addr(mca, netdev)
if (ether_addr_equal(mca->addr, f->macaddr))
@@ -995,7 +937,7 @@ bottom_of_search_loop:
adapter->flags & I40EVF_FLAG_ALLMULTI_ON)
adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI;
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
/**
@@ -1058,6 +1000,8 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
/**
* i40evf_up_complete - Finish the last steps of bringing up a connection
* @adapter: board private structure
+ *
+ * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock.
**/
static void i40evf_up_complete(struct i40evf_adapter *adapter)
{
@@ -1075,6 +1019,8 @@ static void i40evf_up_complete(struct i40evf_adapter *adapter)
/**
* i40e_down - Shutdown the connection processing
* @adapter: board private structure
+ *
+ * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock.
**/
void i40evf_down(struct i40evf_adapter *adapter)
{
@@ -1084,16 +1030,14 @@ void i40evf_down(struct i40evf_adapter *adapter)
if (adapter->state <= __I40EVF_DOWN_PENDING)
return;
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section))
- usleep_range(500, 1000);
-
netif_carrier_off(netdev);
netif_tx_disable(netdev);
adapter->link_up = false;
i40evf_napi_disable_all(adapter);
i40evf_irq_disable(adapter);
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
/* remove all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->remove = true;
@@ -1102,6 +1046,9 @@ void i40evf_down(struct i40evf_adapter *adapter)
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
f->remove = true;
}
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
adapter->state != __I40EVF_RESETTING) {
/* cancel any current operation */
@@ -1115,7 +1062,6 @@ void i40evf_down(struct i40evf_adapter *adapter)
adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
}
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
}
@@ -1441,6 +1387,7 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
q_vector->adapter = adapter;
q_vector->vsi = &adapter->vsi;
q_vector->v_idx = q_idx;
+ q_vector->reg_idx = q_idx;
cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
netif_napi_add(adapter->netdev, &q_vector->napi,
i40evf_napi_poll, NAPI_POLL_WEIGHT);
@@ -1770,13 +1717,8 @@ static void i40evf_watchdog_task(struct work_struct *work)
if (adapter->state == __I40EVF_RUNNING)
i40evf_request_stats(adapter);
watchdog_done:
- if (adapter->state == __I40EVF_RUNNING) {
- i40evf_irq_enable_queues(adapter, ~0);
- i40evf_fire_sw_int(adapter, 0xFF);
- } else {
- i40evf_fire_sw_int(adapter, 0x1);
- }
-
+ if (adapter->state == __I40EVF_RUNNING)
+ i40evf_detect_recover_hung(&adapter->vsi);
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
restart_watchdog:
if (adapter->state == __I40EVF_REMOVE)
@@ -1796,7 +1738,11 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
- if (netif_running(adapter->netdev)) {
+ /* We don't use netif_running() because it may be true prior to
+ * ndo_open() returning, so we can't assume it means all our open
+ * tasks have finished, since we're not holding the rtnl_lock here.
+ */
+ if (adapter->state == __I40EVF_RUNNING) {
set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
netif_carrier_off(adapter->netdev);
netif_tx_disable(adapter->netdev);
@@ -1808,6 +1754,8 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
i40evf_free_all_rx_resources(adapter);
}
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
/* Delete all of the filters, both MAC and VLAN. */
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
list_del(&f->list);
@@ -1819,6 +1767,8 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
kfree(fv);
}
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
i40evf_free_misc_irq(adapter);
i40evf_reset_interrupt_capability(adapter);
i40evf_free_queues(adapter);
@@ -1854,6 +1804,13 @@ static void i40evf_reset_task(struct work_struct *work)
struct i40evf_mac_filter *f;
u32 reg_val;
int i = 0, err;
+ bool running;
+
+ /* When device is being removed it doesn't make sense to run the reset
+ * task, just return in such a case.
+ */
+ if (test_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section))
+ return;
while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
&adapter->crit_section))
@@ -1913,7 +1870,13 @@ static void i40evf_reset_task(struct work_struct *work)
}
continue_reset:
- if (netif_running(netdev)) {
+ /* We don't use netif_running() because it may be true prior to
+ * ndo_open() returning, so we can't assume it means all our open
+ * tasks have finished, since we're not holding the rtnl_lock here.
+ */
+ running = (adapter->state == __I40EVF_RUNNING);
+
+ if (running) {
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
adapter->link_up = false;
@@ -1948,6 +1911,8 @@ continue_reset:
adapter->aq_required |= I40EVF_FLAG_AQ_GET_CONFIG;
adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
/* re-add all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->add = true;
@@ -1956,15 +1921,19 @@ continue_reset:
list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
vlf->add = true;
}
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
- clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
i40evf_misc_irq_enable(adapter);
mod_timer(&adapter->watchdog_timer, jiffies + 2);
- if (netif_running(adapter->netdev)) {
+ /* We were running when the reset started, so we need to restore some
+ * state here.
+ */
+ if (running) {
/* allocate transmit descriptors */
err = i40evf_setup_all_tx_resources(adapter);
if (err)
@@ -1993,9 +1962,13 @@ continue_reset:
adapter->state = __I40EVF_DOWN;
wake_up(&adapter->down_waitqueue);
}
+ clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
return;
reset_err:
+ clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
i40evf_close(netdev);
}
@@ -2239,8 +2212,14 @@ static int i40evf_open(struct net_device *netdev)
return -EIO;
}
- if (adapter->state != __I40EVF_DOWN)
- return -EBUSY;
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ usleep_range(500, 1000);
+
+ if (adapter->state != __I40EVF_DOWN) {
+ err = -EBUSY;
+ goto err_unlock;
+ }
/* allocate transmit descriptors */
err = i40evf_setup_all_tx_resources(adapter);
@@ -2264,6 +2243,8 @@ static int i40evf_open(struct net_device *netdev)
i40evf_irq_enable(adapter, true);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
return 0;
err_req_irq:
@@ -2273,6 +2254,8 @@ err_setup_rx:
i40evf_free_all_rx_resources(adapter);
err_setup_tx:
i40evf_free_all_tx_resources(adapter);
+err_unlock:
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
return err;
}
@@ -2296,6 +2279,9 @@ static int i40evf_close(struct net_device *netdev)
if (adapter->state <= __I40EVF_DOWN_PENDING)
return 0;
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ usleep_range(500, 1000);
set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
if (CLIENT_ENABLED(adapter))
@@ -2305,6 +2291,8 @@ static int i40evf_close(struct net_device *netdev)
adapter->state = __I40EVF_DOWN_PENDING;
i40evf_free_traffic_irqs(adapter);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
/* We explicitly don't free resources here because the hardware is
* still active and can DMA into memory. Resources are cleared in
* i40evf_virtchnl_completion() after we get confirmation from the PF
@@ -2357,13 +2345,19 @@ static int i40evf_set_features(struct net_device *netdev,
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
- if (!VLAN_ALLOWED(adapter))
+ /* Don't allow changing VLAN_RX flag when VLAN is set for VF
+ * and return an error in this case
+ */
+ if (VLAN_ALLOWED(adapter)) {
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ adapter->aq_required |=
+ I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
+ else
+ adapter->aq_required |=
+ I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
+ } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
return -EINVAL;
-
- if (features & NETIF_F_HW_VLAN_CTAG_RX)
- adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
- else
- adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
+ }
return 0;
}
@@ -2943,6 +2937,8 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mutex_init(&hw->aq.asq_mutex);
mutex_init(&hw->aq.arq_mutex);
+ spin_lock_init(&adapter->mac_vlan_list_lock);
+
INIT_LIST_HEAD(&adapter->mac_filter_list);
INIT_LIST_HEAD(&adapter->vlan_filter_list);
@@ -2985,6 +2981,10 @@ static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
netif_device_detach(netdev);
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ usleep_range(500, 1000);
+
if (netif_running(netdev)) {
rtnl_lock();
i40evf_down(adapter);
@@ -2993,6 +2993,8 @@ static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
i40evf_free_misc_irq(adapter);
i40evf_reset_interrupt_capability(adapter);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
retval = pci_save_state(pdev);
if (retval)
return retval;
@@ -3066,7 +3068,8 @@ static void i40evf_remove(struct pci_dev *pdev)
struct i40evf_mac_filter *f, *ftmp;
struct i40e_hw *hw = &adapter->hw;
int err;
-
+ /* Indicate we are in remove and not to run reset_task */
+ set_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section);
cancel_delayed_work_sync(&adapter->init_task);
cancel_work_sync(&adapter->reset_task);
cancel_delayed_work_sync(&adapter->client_task);
@@ -3101,8 +3104,6 @@ static void i40evf_remove(struct pci_dev *pdev)
if (adapter->watchdog_timer.function)
del_timer_sync(&adapter->watchdog_timer);
- flush_scheduled_work();
-
i40evf_free_rss(adapter);
if (hw->aq.asq.count)
@@ -3118,6 +3119,7 @@ static void i40evf_remove(struct pci_dev *pdev)
i40evf_free_all_rx_resources(adapter);
i40evf_free_queues(adapter);
kfree(adapter->vf_res);
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
/* If we got removed before an up/down sequence, we've got a filter
* hanging out there that we need to get rid of.
*/
@@ -3130,6 +3132,8 @@ static void i40evf_remove(struct pci_dev *pdev)
kfree(f);
}
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 46c8b8a3907c..50ce0d6c09ef 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -433,12 +433,16 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
adapter->current_op);
return;
}
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
list_for_each_entry(f, &adapter->mac_filter_list, list) {
if (f->add)
count++;
}
if (!count) {
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
@@ -456,8 +460,10 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
}
veal = kzalloc(len, GFP_KERNEL);
- if (!veal)
+ if (!veal) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
+ }
veal->vsi_id = adapter->vsi_res->vsi_id;
veal->num_elements = count;
@@ -472,6 +478,9 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR,
(u8 *)veal, len);
kfree(veal);
@@ -498,12 +507,16 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
adapter->current_op);
return;
}
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
list_for_each_entry(f, &adapter->mac_filter_list, list) {
if (f->remove)
count++;
}
if (!count) {
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
@@ -520,8 +533,10 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
more = true;
}
veal = kzalloc(len, GFP_KERNEL);
- if (!veal)
+ if (!veal) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
+ }
veal->vsi_id = adapter->vsi_res->vsi_id;
veal->num_elements = count;
@@ -537,6 +552,9 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR,
(u8 *)veal, len);
kfree(veal);
@@ -564,12 +582,15 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
return;
}
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
if (f->add)
count++;
}
if (!count) {
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
@@ -586,8 +607,10 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
more = true;
}
vvfl = kzalloc(len, GFP_KERNEL);
- if (!vvfl)
+ if (!vvfl) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
+ }
vvfl->vsi_id = adapter->vsi_res->vsi_id;
vvfl->num_elements = count;
@@ -602,6 +625,9 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
kfree(vvfl);
}
@@ -628,12 +654,15 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
return;
}
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
if (f->remove)
count++;
}
if (!count) {
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
@@ -650,8 +679,10 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
more = true;
}
vvfl = kzalloc(len, GFP_KERNEL);
- if (!vvfl)
+ if (!vvfl) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
+ }
vvfl->vsi_id = adapter->vsi_res->vsi_id;
vvfl->num_elements = count;
@@ -667,6 +698,9 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
kfree(vvfl);
}
@@ -705,8 +739,10 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
}
if (!flags) {
- adapter->flags &= ~I40EVF_FLAG_PROMISC_ON;
- adapter->aq_required &= ~I40EVF_FLAG_AQ_RELEASE_PROMISC;
+ adapter->flags &= ~(I40EVF_FLAG_PROMISC_ON |
+ I40EVF_FLAG_ALLMULTI_ON);
+ adapter->aq_required &= ~(I40EVF_FLAG_AQ_RELEASE_PROMISC |
+ I40EVF_FLAG_AQ_RELEASE_ALLMULTI);
dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
}
@@ -965,23 +1001,34 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
if (v_opcode == VIRTCHNL_OP_EVENT) {
struct virtchnl_pf_event *vpe =
(struct virtchnl_pf_event *)msg;
+ bool link_up = vpe->event_data.link_event.link_status;
switch (vpe->event) {
case VIRTCHNL_EVENT_LINK_CHANGE:
adapter->link_speed =
vpe->event_data.link_event.link_speed;
- if (adapter->link_up !=
- vpe->event_data.link_event.link_status) {
- adapter->link_up =
- vpe->event_data.link_event.link_status;
- if (adapter->link_up) {
- netif_tx_start_all_queues(netdev);
- netif_carrier_on(netdev);
- } else {
- netif_tx_stop_all_queues(netdev);
- netif_carrier_off(netdev);
- }
- i40evf_print_link_message(adapter);
+
+ /* we've already got the right link status, bail */
+ if (adapter->link_up == link_up)
+ break;
+
+ /* If we get link up message and start queues before
+ * our queues are configured it will trigger a TX hang.
+ * In that case, just ignore the link status message,
+ * we'll get another one after we enable queues and
+ * actually prepared to send traffic.
+ */
+ if (link_up && adapter->state != __I40EVF_RUNNING)
+ break;
+
+ adapter->link_up = link_up;
+ if (link_up) {
+ netif_tx_start_all_queues(netdev);
+ netif_carrier_on(netdev);
+ } else {
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
}
+ i40evf_print_link_message(adapter);
break;
case VIRTCHNL_EVENT_RESET_IMPENDING:
dev_info(&adapter->pdev->dev, "PF reset warning received\n");
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 92845692087a..1c6b8d9176a8 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -690,6 +690,7 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
+unsigned int igb_get_max_rss_queues(struct igb_adapter *);
#ifdef CONFIG_IGB_HWMON
void igb_sysfs_exit(struct igb_adapter *adapter);
int igb_sysfs_init(struct igb_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index d06a8db514d4..606e6761758f 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -3338,37 +3338,7 @@ static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
static unsigned int igb_max_channels(struct igb_adapter *adapter)
{
- struct e1000_hw *hw = &adapter->hw;
- unsigned int max_combined = 0;
-
- switch (hw->mac.type) {
- case e1000_i211:
- max_combined = IGB_MAX_RX_QUEUES_I211;
- break;
- case e1000_82575:
- case e1000_i210:
- max_combined = IGB_MAX_RX_QUEUES_82575;
- break;
- case e1000_i350:
- if (!!adapter->vfs_allocated_count) {
- max_combined = 1;
- break;
- }
- /* fall through */
- case e1000_82576:
- if (!!adapter->vfs_allocated_count) {
- max_combined = 2;
- break;
- }
- /* fall through */
- case e1000_82580:
- case e1000_i354:
- default:
- max_combined = IGB_MAX_RX_QUEUES;
- break;
- }
-
- return max_combined;
+ return igb_get_max_rss_queues(adapter);
}
static void igb_get_channels(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index c208753ff5b7..b88fae785369 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1744,6 +1744,20 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
* value = idleSlope * 61034
* ----------------- (E6)
* 1000000
+ *
+ * NOTE: For i210, given the above, we can see that idleslope
+ * is represented in 16.38431 kbps units by the value at
+ * the TQAVCC register (1Gbps / 61034), which reduces
+ * the granularity for idleslope increments.
+ * For instance, if you want to configure a 2576kbps
+ * idleslope, the value to be written on the register
+ * would have to be 157.23. If rounded down, you end
+ * up with less bandwidth available than originally
+ * required (~2572 kbps). If rounded up, you end up
+ * with a higher bandwidth (~2589 kbps). Below the
+ * approach we take is to always round up the
+ * calculated value, so the resulting bandwidth might
+ * be slightly higher for some configurations.
*/
value = DIV_ROUND_UP_ULL(idleslope * 61034ULL, 1000000);
@@ -3200,8 +3214,6 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
/* if allocation failed then we do not support SR-IOV */
if (!adapter->vf_data) {
adapter->vfs_allocated_count = 0;
- dev_err(&pdev->dev,
- "Unable to allocate memory for VF Data Storage\n");
err = -ENOMEM;
goto out;
}
@@ -3373,10 +3385,10 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
#endif /* CONFIG_PCI_IOV */
}
-static void igb_init_queue_configuration(struct igb_adapter *adapter)
+unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- u32 max_rss_queues;
+ unsigned int max_rss_queues;
/* Determine the maximum number of RSS queues supported. */
switch (hw->mac.type) {
@@ -3407,6 +3419,14 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)
break;
}
+ return max_rss_queues;
+}
+
+static void igb_init_queue_configuration(struct igb_adapter *adapter)
+{
+ u32 max_rss_queues;
+
+ max_rss_queues = igb_get_max_rss_queues(adapter);
adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
igb_set_flag_queue_pairs(adapter, max_rss_queues);
@@ -3676,7 +3696,7 @@ static int __igb_close(struct net_device *netdev, bool suspending)
int igb_close(struct net_device *netdev)
{
- if (netif_device_present(netdev))
+ if (netif_device_present(netdev) || netdev->dismantle)
return __igb_close(netdev, false);
return 0;
}
@@ -8718,7 +8738,8 @@ static void igb_rar_set_index(struct igb_adapter *adapter, u32 index)
/* Indicate to hardware the Address is Valid. */
if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) {
- rar_high |= E1000_RAH_AV;
+ if (is_valid_ether_addr(addr))
+ rar_high |= E1000_RAH_AV;
if (hw->mac.type == e1000_82575)
rar_high |= E1000_RAH_POOL_1 *
@@ -8756,17 +8777,36 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
+
+ if (vf >= adapter->vfs_allocated_count)
+ return -EINVAL;
+
+ /* Setting the VF MAC to 0 reverts the IGB_VF_FLAG_PF_SET_MAC
+ * flag and allows to overwrite the MAC via VF netdev. This
+ * is necessary to allow libvirt a way to restore the original
+ * MAC after unbinding vfio-pci and reloading igbvf after shutting
+ * down a VM.
+ */
+ if (is_zero_ether_addr(mac)) {
+ adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC;
+ dev_info(&adapter->pdev->dev,
+ "remove administratively set MAC on VF %d\n",
+ vf);
+ } else if (is_valid_ether_addr(mac)) {
+ adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
+ dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
+ mac, vf);
+ dev_info(&adapter->pdev->dev,
+ "Reload the VF driver to make this change effective.");
+ /* Generate additional warning if PF is down */
+ if (test_bit(__IGB_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev,
+ "The VF MAC address has been set, but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev,
+ "Bring the PF device up before attempting to use the VF device.\n");
+ }
+ } else {
return -EINVAL;
- adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
- dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
- dev_info(&adapter->pdev->dev,
- "Reload the VF driver to make this change effective.");
- if (test_bit(__IGB_DOWN, &adapter->state)) {
- dev_warn(&adapter->pdev->dev,
- "The VF MAC address has been set, but the PF device is not up.\n");
- dev_warn(&adapter->pdev->dev,
- "Bring the PF device up before attempting to use the VF device.\n");
}
return igb_set_vf_mac(adapter, vf, mac);
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 841c2a083349..0746b19ec6d3 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -643,6 +643,10 @@ static void igb_ptp_tx_work(struct work_struct *work)
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
adapter->tx_hwtstamp_timeouts++;
+ /* Clear the tx valid bit in TSYNCTXCTL register to enable
+ * interrupt
+ */
+ rd32(E1000_TXSTMPH);
dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
return;
}
@@ -717,6 +721,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
*/
void igb_ptp_tx_hang(struct igb_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
bool timeout = time_is_before_jiffies(adapter->ptp_tx_start +
IGB_PTP_TX_TIMEOUT);
@@ -736,6 +741,10 @@ void igb_ptp_tx_hang(struct igb_adapter *adapter)
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
adapter->tx_hwtstamp_timeouts++;
+ /* Clear the tx valid bit in TSYNCTXCTL register to enable
+ * interrupt
+ */
+ rd32(E1000_TXSTMPH);
dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 35e6fa643c7e..8319465eb38d 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -42,3 +42,4 @@ ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o
ixgbe-$(CONFIG_DEBUG_FS) += ixgbe_debugfs.o
ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
+ixgbe-$(CONFIG_XFRM_OFFLOAD) += ixgbe_ipsec.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 468c3555a629..c1e3a0039ea5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -52,7 +52,9 @@
#ifdef CONFIG_IXGBE_DCA
#include <linux/dca.h>
#endif
+#include "ixgbe_ipsec.h"
+#include <net/xdp.h>
#include <net/busy_poll.h>
/* common prefix used by pr_<> macros */
@@ -170,10 +172,11 @@ enum ixgbe_tx_flags {
IXGBE_TX_FLAGS_CC = 0x08,
IXGBE_TX_FLAGS_IPV4 = 0x10,
IXGBE_TX_FLAGS_CSUM = 0x20,
+ IXGBE_TX_FLAGS_IPSEC = 0x40,
/* software defined flags */
- IXGBE_TX_FLAGS_SW_VLAN = 0x40,
- IXGBE_TX_FLAGS_FCOE = 0x80,
+ IXGBE_TX_FLAGS_SW_VLAN = 0x80,
+ IXGBE_TX_FLAGS_FCOE = 0x100,
};
/* VLAN info */
@@ -332,7 +335,6 @@ struct ixgbe_ring {
struct net_device *netdev; /* netdev ring belongs to */
struct bpf_prog *xdp_prog;
struct device *dev; /* device for DMA mapping */
- struct ixgbe_fwd_adapter *l2_accel_priv;
void *desc; /* descriptor ring memory */
union {
struct ixgbe_tx_buffer *tx_buffer_info;
@@ -371,6 +373,7 @@ struct ixgbe_ring {
struct ixgbe_tx_queue_stats tx_stats;
struct ixgbe_rx_queue_stats rx_stats;
};
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_internodealigned_in_smp;
enum ixgbe_ring_f_enum {
@@ -395,8 +398,7 @@ enum ixgbe_ring_f_enum {
#define MAX_XDP_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
#define IXGBE_MAX_L2A_QUEUES 4
#define IXGBE_BAD_L2A_QUEUE 3
-#define IXGBE_MAX_MACVLANS 31
-#define IXGBE_MAX_DCBMACVLANS 8
+#define IXGBE_MAX_MACVLANS 63
struct ixgbe_ring_feature {
u16 limit; /* upper limit on feature indices */
@@ -629,15 +631,18 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_EEE_CAPABLE BIT(14)
#define IXGBE_FLAG2_EEE_ENABLED BIT(15)
#define IXGBE_FLAG2_RX_LEGACY BIT(16)
+#define IXGBE_FLAG2_IPSEC_ENABLED BIT(17)
/* Tx fast path data */
int num_tx_queues;
u16 tx_itr_setting;
u16 tx_work_limit;
+ u64 tx_ipsec;
/* Rx fast path data */
int num_rx_queues;
u16 rx_itr_setting;
+ u64 rx_ipsec;
/* Port number used to identify VXLAN traffic */
__be16 vxlan_port;
@@ -674,6 +679,7 @@ struct ixgbe_adapter {
struct ieee_ets *ixgbe_ieee_ets;
struct ixgbe_dcb_config dcb_cfg;
struct ixgbe_dcb_config temp_dcb_cfg;
+ u8 hw_tcs;
u8 dcb_set_bitmap;
u8 dcbx_cap;
enum ixgbe_fc_mode last_lfc_mode;
@@ -721,8 +727,7 @@ struct ixgbe_adapter {
u16 bridge_mode;
- u16 eeprom_verh;
- u16 eeprom_verl;
+ char eeprom_id[NVM_VER_SIZE];
u16 eeprom_cap;
u32 interrupt_event;
@@ -766,7 +771,8 @@ struct ixgbe_adapter {
#endif /*CONFIG_DEBUG_FS*/
u8 default_up;
- unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
+ /* Bitmask indicating in use pools */
+ DECLARE_BITMAP(fwd_bitmask, IXGBE_MAX_MACVLANS + 1);
#define IXGBE_MAX_LINK_HANDLE 10
struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE];
@@ -780,6 +786,10 @@ struct ixgbe_adapter {
#define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
u32 *rss_key;
+
+#ifdef CONFIG_XFRM
+ struct ixgbe_ipsec *ipsec;
+#endif /* CONFIG_XFRM */
};
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
@@ -1010,4 +1020,24 @@ void ixgbe_store_key(struct ixgbe_adapter *adapter);
void ixgbe_store_reta(struct ixgbe_adapter *adapter);
s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
+#ifdef CONFIG_XFRM_OFFLOAD
+void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter);
+void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter);
+void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter);
+void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb);
+int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
+ struct ixgbe_ipsec_tx_data *itd);
+#else
+static inline void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { };
+static inline void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) { };
+static inline void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { };
+static inline void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb) { };
+static inline int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
+ struct ixgbe_tx_buffer *first,
+ struct ixgbe_ipsec_tx_data *itd) { return 0; };
+#endif /* CONFIG_XFRM_OFFLOAD */
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 8a32eb7d47b9..a0ebd9ecf243 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -431,6 +431,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
/**
* ixgbe_start_mac_link_82598 - Configures MAC link settings
* @hw: pointer to hardware structure
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
*
* Configures link settings based on values in the ixgbe_hw struct.
* Restarts the link. Performs autonegotiation if needed.
@@ -1054,7 +1055,7 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
* ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
* @hw: pointer to hardware structure
* @byte_offset: byte offset at address 0xA2
- * @eeprom_data: value read
+ * @sff8472_data: value read
*
* Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
**/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index d602637ccc40..4dfc81dbee4b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -221,7 +221,7 @@ static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
/**
* prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
* @hw: pointer to hardware structure
- * @reg_val: value to write to AUTOC
+ * @autoc: value to write to AUTOC
* @locked: bool to indicate whether the SW/FW lock was already taken by
* previous proc_autoc_read_82599.
*
@@ -1310,10 +1310,11 @@ do { \
/**
* ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
- * @stream: input bitstream to compute the hash on
+ * @input: input bitstream to compute the hash on
+ * @common: compressed common input dword
*
* This function is almost identical to the function above but contains
- * several optomizations such as unwinding all of the loops, letting the
+ * several optimizations such as unwinding all of the loops, letting the
* compiler work out all of the conditional ifs since the keys are static
* defines, and computing two keys at once since the hashed dword stream
* will be the same for both keys.
@@ -1446,7 +1447,7 @@ do { \
/**
* ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
- * @atr_input: input bitstream to compute the hash on
+ * @input: input bitstream to compute the hash on
* @input_mask: mask for the input bitstream
*
* This function serves two main purposes. First it applies the input_mask
@@ -2078,6 +2079,7 @@ reset_pipeline_out:
* ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
+ * @dev_addr: address to read from
* @data: value read
*
* Performs byte read operation to SFP module's EEPROM over I2C interface at
@@ -2131,6 +2133,7 @@ release_i2c_access:
* ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
+ * @dev_addr: address to write to
* @data: value to write
*
* Performs byte write operation to SFP module's EEPROM over I2C interface at
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 9bef255f6a18..61188f343955 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1613,6 +1613,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
/**
* ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
* @hw: pointer to hardware structure
+ * @count: number of bits to shift
**/
static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
{
@@ -1667,7 +1668,7 @@ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
/**
* ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
* @hw: pointer to hardware structure
- * @eecd: EECD's current value
+ * @eec: EEC's current value
**/
static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
{
@@ -2037,7 +2038,7 @@ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
/**
* ixgbe_set_mta - Set bit-vector in multicast table
* @hw: pointer to hardware structure
- * @hash_value: Multicast address hash value
+ * @mc_addr: Multicast address
*
* Sets the bit-vector in the multicast table.
**/
@@ -3086,6 +3087,8 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
* ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
* @hw: pointer to hardware structure
* @vlan: VLAN id to write to VLAN filter
+ * @vlvf_bypass: true to find vlanid only, false returns first empty slot if
+ * vlanid not found
*
* return the VLVF index where this VLAN id should be placed
*
@@ -3476,7 +3479,7 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
* ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
* @hw: pointer to hardware structure
* @enable: enable or disable switch for VLAN anti-spoofing
- * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
+ * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
*
**/
void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
@@ -4028,6 +4031,118 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
return 0;
}
+/**
+ * ixgbe_get_orom_version - Return option ROM from EEPROM
+ *
+ * @hw: pointer to hardware structure
+ * @nvm_ver: pointer to output structure
+ *
+ * if valid option ROM version, nvm_ver->or_valid set to true
+ * else nvm_ver->or_valid is false.
+ **/
+void ixgbe_get_orom_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver)
+{
+ u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl;
+
+ nvm_ver->or_valid = false;
+ /* Option Rom may or may not be present. Start with pointer */
+ hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset);
+
+ /* make sure offset is valid */
+ if (offset == 0x0 || offset == NVM_INVALID_PTR)
+ return;
+
+ hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh);
+ hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl);
+
+ /* option rom exists and is valid */
+ if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 ||
+ eeprom_cfg_blkl == NVM_VER_INVALID ||
+ eeprom_cfg_blkh == NVM_VER_INVALID)
+ return;
+
+ nvm_ver->or_valid = true;
+ nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT;
+ nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) |
+ (eeprom_cfg_blkh >> NVM_OROM_SHIFT);
+ nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK;
+}
+
+/**
+ * ixgbe_get_oem_prod_version Etrack ID from EEPROM
+ *
+ * @hw: pointer to hardware structure
+ * @nvm_ver: pointer to output structure
+ *
+ * if valid OEM product version, nvm_ver->oem_valid set to true
+ * else nvm_ver->oem_valid is false.
+ **/
+void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver)
+{
+ u16 rel_num, prod_ver, mod_len, cap, offset;
+
+ nvm_ver->oem_valid = false;
+ hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset);
+
+ /* Return is offset to OEM Product Version block is invalid */
+ if (offset == 0x0 || offset == NVM_INVALID_PTR)
+ return;
+
+ /* Read product version block */
+ hw->eeprom.ops.read(hw, offset, &mod_len);
+ hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap);
+
+ /* Return if OEM product version block is invalid */
+ if (mod_len != NVM_OEM_PROD_VER_MOD_LEN ||
+ (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0)
+ return;
+
+ hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver);
+ hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num);
+
+ /* Return if version is invalid */
+ if ((rel_num | prod_ver) == 0x0 ||
+ rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID)
+ return;
+
+ nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT;
+ nvm_ver->oem_minor = prod_ver & NVM_VER_MASK;
+ nvm_ver->oem_release = rel_num;
+ nvm_ver->oem_valid = true;
+}
+
+/**
+ * ixgbe_get_etk_id - Return Etrack ID from EEPROM
+ *
+ * @hw: pointer to hardware structure
+ * @nvm_ver: pointer to output structure
+ *
+ * word read errors will return 0xFFFF
+ **/
+void ixgbe_get_etk_id(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver)
+{
+ u16 etk_id_l, etk_id_h;
+
+ if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l))
+ etk_id_l = NVM_VER_INVALID;
+ if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h))
+ etk_id_h = NVM_VER_INVALID;
+
+ /* The word order for the version format is determined by high order
+ * word bit 15.
+ */
+ if ((etk_id_h & NVM_ETK_VALID) == 0) {
+ nvm_ver->etk_id = etk_id_h;
+ nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT);
+ } else {
+ nvm_ver->etk_id = etk_id_l;
+ nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT);
+ }
+}
+
void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
{
u32 rxctrl;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index a01409e2e06c..4d4c02366cb3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -139,6 +139,12 @@ extern const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT];
s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
+void ixgbe_get_etk_id(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver);
+void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver);
+void ixgbe_get_orom_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver);
void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index 072ef3b5fc61..aaea8282bfd2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -39,6 +39,10 @@
* are the smallest unit programmable into the underlying
* hardware. The IEEE 802.1Qaz specification do not use bandwidth
* groups so this is much simplified from the CEE case.
+ * @bw: bandwidth index by traffic class
+ * @refill: refill credits index by traffic class
+ * @max: max credits by traffic class
+ * @max_frame: maximum frame size
*/
static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
__u16 *max, int max_frame)
@@ -72,8 +76,10 @@ static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
/**
* ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits
- * @ixgbe_dcb_config: Struct containing DCB settings.
- * @direction: Configuring either Tx or Rx.
+ * @hw: pointer to hardware structure
+ * @dcb_config: Struct containing DCB settings
+ * @max_frame: Maximum frame size
+ * @direction: Configuring either Tx or Rx
*
* This function calculates the credits allocated to each traffic class.
* It should be called only after the rules are checked by
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index b79e93a5b699..f94c7e82a30b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -34,7 +34,9 @@
/**
* ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @prio_type: priority type indexed by traffic class
*
* Configure Rx Data Arbiter and credits for each traffic class.
*/
@@ -91,7 +93,10 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
@@ -137,7 +142,10 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
*
* Configure Tx Data Arbiter and credits for each traffic class.
*/
@@ -184,7 +192,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_config_pfc_82598 - Config priority flow control
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @pfc_en: enabled pfc bitmask
*
* Configure Priority Flow Control for each traffic class.
*/
@@ -269,7 +277,11 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
/**
* ixgbe_dcb_hw_config_82598 - Config and enable DCB
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @pfc_en: enabled pfc bitmask
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
*
* Configure dcb settings and enable dcb mode.
*/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 1011d644978f..1eed6811e914 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -38,6 +38,7 @@
* @max: max credits index by traffic class
* @bwg_id: bandwidth grouping indexed by traffic class
* @prio_type: priority type indexed by traffic class
+ * @prio_tc: priority to tc assignments indexed by priority
*
* Configure Rx Packet Arbiter and credits for each traffic class.
*/
@@ -148,6 +149,7 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
* @max: max credits index by traffic class
* @bwg_id: bandwidth grouping indexed by traffic class
* @prio_type: priority type indexed by traffic class
+ * @prio_tc: priority to tc assignments indexed by priority
*
* Configure Tx Packet Arbiter and credits for each traffic class.
*/
@@ -344,11 +346,12 @@ static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
/**
* ixgbe_dcb_hw_config_82599 - Configure and enable DCB
* @hw: pointer to hardware structure
+ * @pfc_en: enabled pfc bitmask
* @refill: refill credits index by traffic class
* @max: max credits index by traffic class
* @bwg_id: bandwidth grouping indexed by traffic class
* @prio_type: priority type indexed by traffic class
- * @pfc_en: enabled pfc bitmask
+ * @prio_tc: priority to tc assignments indexed by priority
*
* Configure dcb settings and enable dcb mode.
*/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 78c52375acc6..b33f3f87e4b1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -571,7 +571,7 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
if (max_tc > adapter->dcb_cfg.num_tcs.pg_tcs)
return -EINVAL;
- if (max_tc != netdev_get_num_tc(dev)) {
+ if (max_tc != adapter->hw_tcs) {
err = ixgbe_setup_tc(dev, max_tc);
if (err)
return err;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index 5e2c1e35e517..ad54080488ee 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -249,7 +249,7 @@ void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter)
/**
* ixgbe_dbg_adapter_exit - clear out the adapter's debugfs entries
- * @pf: the pf that is stopping
+ * @adapter: the adapter that is exiting
**/
void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter)
{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 0aad1c2a3667..221f15803480 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -115,6 +115,8 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts)},
{"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped)},
{"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared)},
+ {"tx_ipsec", IXGBE_STAT(tx_ipsec)},
+ {"rx_ipsec", IXGBE_STAT(rx_ipsec)},
#ifdef IXGBE_FCOE
{"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
{"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
@@ -1014,16 +1016,13 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- u32 nvm_track_id;
strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, ixgbe_driver_version,
sizeof(drvinfo->version));
- nvm_track_id = (adapter->eeprom_verh << 16) |
- adapter->eeprom_verl;
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
- nvm_track_id);
+ strlcpy(drvinfo->fw_version, adapter->eeprom_id,
+ sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
@@ -1156,6 +1155,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
memcpy(&temp_ring[i], adapter->rx_ring[i],
sizeof(struct ixgbe_ring));
+ /* Clear copied XDP RX-queue info */
+ memset(&temp_ring[i].xdp_rxq, 0,
+ sizeof(temp_ring[i].xdp_rxq));
+
temp_ring[i].count = new_rx_count;
err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
if (err) {
@@ -3082,26 +3085,9 @@ static int ixgbe_get_ts_info(struct net_device *dev,
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
- /* fallthrough */
+ break;
case ixgbe_mac_X540:
case ixgbe_mac_82599EB:
- info->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
-
- if (adapter->ptp_clock)
- info->phc_index = ptp_clock_index(adapter->ptp_clock);
- else
- info->phc_index = -1;
-
- info->tx_types =
- BIT(HWTSTAMP_TX_OFF) |
- BIT(HWTSTAMP_TX_ON);
-
info->rx_filters |=
BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
@@ -3110,13 +3096,31 @@ static int ixgbe_get_ts_info(struct net_device *dev,
default:
return ethtool_op_get_ts_info(dev, info);
}
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (adapter->ptp_clock)
+ info->phc_index = ptp_clock_index(adapter->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types =
+ BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
return 0;
}
static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
{
unsigned int max_combined;
- u8 tcs = netdev_get_num_tc(adapter->netdev);
+ u8 tcs = adapter->hw_tcs;
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
/* We only support one q_vector without MSI-X */
@@ -3173,7 +3177,7 @@ static void ixgbe_get_channels(struct net_device *dev,
return;
/* same thing goes for being DCB enabled */
- if (netdev_get_num_tc(dev) > 1)
+ if (adapter->hw_tcs > 1)
return;
/* if ATR is disabled we can exit */
@@ -3219,7 +3223,7 @@ static int ixgbe_set_channels(struct net_device *dev,
#endif
/* use setup TC to update any traffic class queue mapping */
- return ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
+ return ixgbe_setup_tc(dev, adapter->hw_tcs);
}
static int ixgbe_get_module_info(struct net_device *dev,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index a23c2b5411a0..7a09a40e4472 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -150,6 +150,7 @@ skip_ddpinv:
* @xid: the exchange id requesting ddp
* @sgl: the scatter-gather list for this request
* @sgc: the number of scatter-gather items
+ * @target_mode: 1 to setup target mode, 0 to setup initiator mode
*
* Returns : 1 for success and 0 for no ddp
*/
@@ -1034,11 +1035,8 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
ixgbe_driver_name,
ixgbe_driver_version);
/* Firmware Version */
- snprintf(info->firmware_version,
- sizeof(info->firmware_version),
- "0x%08x",
- (adapter->eeprom_verh << 16) |
- adapter->eeprom_verl);
+ strlcpy(info->firmware_version, adapter->eeprom_id,
+ sizeof(info->firmware_version));
/* Model */
if (hw->mac.type == ixgbe_mac_82599EB) {
@@ -1066,7 +1064,7 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
/**
* ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
*
* Return : TC that FCoE is mapped to
*/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
new file mode 100644
index 000000000000..93eacddb6704
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -0,0 +1,941 @@
+/*******************************************************************************
+ *
+ * Intel 10 Gigabit PCI Express Linux driver
+ * Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "ixgbe.h"
+#include <net/xfrm.h>
+#include <crypto/aead.h>
+
+/**
+ * ixgbe_ipsec_set_tx_sa - set the Tx SA registers
+ * @hw: hw specific details
+ * @idx: register index to write
+ * @key: key byte array
+ * @salt: salt bytes
+ **/
+static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw *hw, u16 idx,
+ u32 key[], u32 salt)
+{
+ u32 reg;
+ int i;
+
+ for (i = 0; i < 4; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i), cpu_to_be32(key[3 - i]));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, cpu_to_be32(salt));
+ IXGBE_WRITE_FLUSH(hw);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_IPSTXIDX);
+ reg &= IXGBE_RXTXIDX_IPS_EN;
+ reg |= idx << IXGBE_RXTXIDX_IDX_SHIFT | IXGBE_RXTXIDX_WRITE;
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, reg);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_ipsec_set_rx_item - set an Rx table item
+ * @hw: hw specific details
+ * @idx: register index to write
+ * @tbl: table selector
+ *
+ * Trigger the device to store into a particular Rx table the
+ * data that has already been loaded into the input register
+ **/
+static void ixgbe_ipsec_set_rx_item(struct ixgbe_hw *hw, u16 idx,
+ enum ixgbe_ipsec_tbl_sel tbl)
+{
+ u32 reg;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_IPSRXIDX);
+ reg &= IXGBE_RXTXIDX_IPS_EN;
+ reg |= tbl << IXGBE_RXIDX_TBL_SHIFT |
+ idx << IXGBE_RXTXIDX_IDX_SHIFT |
+ IXGBE_RXTXIDX_WRITE;
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, reg);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_ipsec_set_rx_sa - set up the register bits to save SA info
+ * @hw: hw specific details
+ * @idx: register index to write
+ * @spi: security parameter index
+ * @key: key byte array
+ * @salt: salt bytes
+ * @mode: rx decrypt control bits
+ * @ip_idx: index into IP table for related IP address
+ **/
+static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi,
+ u32 key[], u32 salt, u32 mode, u32 ip_idx)
+{
+ int i;
+
+ /* store the SPI (in bigendian) and IPidx */
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, cpu_to_le32(spi));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, ip_idx);
+ IXGBE_WRITE_FLUSH(hw);
+
+ ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_spi_tbl);
+
+ /* store the key, salt, and mode */
+ for (i = 0; i < 4; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i), cpu_to_be32(key[3 - i]));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, cpu_to_be32(salt));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, mode);
+ IXGBE_WRITE_FLUSH(hw);
+
+ ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_key_tbl);
+}
+
+/**
+ * ixgbe_ipsec_set_rx_ip - set up the register bits to save SA IP addr info
+ * @hw: hw specific details
+ * @idx: register index to write
+ * @addr: IP address byte array
+ **/
+static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[])
+{
+ int i;
+
+ /* store the ip address */
+ for (i = 0; i < 4; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i), cpu_to_le32(addr[i]));
+ IXGBE_WRITE_FLUSH(hw);
+
+ ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_ip_tbl);
+}
+
+/**
+ * ixgbe_ipsec_clear_hw_tables - because some tables don't get cleared on reset
+ * @adapter: board private structure
+ **/
+static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ipsec *ipsec = adapter->ipsec;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 buf[4] = {0, 0, 0, 0};
+ u16 idx;
+
+ /* disable Rx and Tx SA lookup */
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
+
+ /* scrub the tables - split the loops for the max of the IP table */
+ for (idx = 0; idx < IXGBE_IPSEC_MAX_RX_IP_COUNT; idx++) {
+ ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
+ ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
+ ixgbe_ipsec_set_rx_ip(hw, idx, (__be32 *)buf);
+ }
+ for (; idx < IXGBE_IPSEC_MAX_SA_COUNT; idx++) {
+ ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
+ ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
+ }
+
+ ipsec->num_rx_sa = 0;
+ ipsec->num_tx_sa = 0;
+}
+
+/**
+ * ixgbe_ipsec_stop_data
+ * @adapter: board private structure
+ **/
+static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool link = adapter->link_up;
+ u32 t_rdy, r_rdy;
+ u32 limit;
+ u32 reg;
+
+ /* halt data paths */
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ reg |= IXGBE_SECTXCTRL_TX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ reg |= IXGBE_SECRXCTRL_RX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
+
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* If the tx fifo doesn't have link, but still has data,
+ * we can't clear the tx sec block. Set the MAC loopback
+ * before block clear
+ */
+ if (!link) {
+ reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+ reg |= IXGBE_MACC_FLU;
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ reg |= IXGBE_HLREG0_LPBK;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
+
+ IXGBE_WRITE_FLUSH(hw);
+ mdelay(3);
+ }
+
+ /* wait for the paths to empty */
+ limit = 20;
+ do {
+ mdelay(10);
+ t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
+ IXGBE_SECTXSTAT_SECTX_RDY;
+ r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
+ IXGBE_SECRXSTAT_SECRX_RDY;
+ } while (!t_rdy && !r_rdy && limit--);
+
+ /* undo loopback if we played with it earlier */
+ if (!link) {
+ reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+ reg &= ~IXGBE_MACC_FLU;
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ reg &= ~IXGBE_HLREG0_LPBK;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
+
+ IXGBE_WRITE_FLUSH(hw);
+ }
+}
+
+/**
+ * ixgbe_ipsec_stop_engine
+ * @adapter: board private structure
+ **/
+static void ixgbe_ipsec_stop_engine(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg;
+
+ ixgbe_ipsec_stop_data(adapter);
+
+ /* disable Rx and Tx SA lookup */
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
+
+ /* disable the Rx and Tx engines and full packet store-n-forward */
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ reg |= IXGBE_SECTXCTRL_SECTX_DIS;
+ reg &= ~IXGBE_SECTXCTRL_STORE_FORWARD;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ reg |= IXGBE_SECRXCTRL_SECRX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
+
+ /* restore the "tx security buffer almost full threshold" to 0x250 */
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x250);
+
+ /* Set minimum IFG between packets back to the default 0x1 */
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ reg = (reg & 0xfffffff0) | 0x1;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
+
+ /* final set for normal (no ipsec offload) processing */
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_SECTX_DIS);
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, IXGBE_SECRXCTRL_SECRX_DIS);
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_ipsec_start_engine
+ * @adapter: board private structure
+ *
+ * NOTE: this increases power consumption whether being used or not
+ **/
+static void ixgbe_ipsec_start_engine(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg;
+
+ ixgbe_ipsec_stop_data(adapter);
+
+ /* Set minimum IFG between packets to 3 */
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ reg = (reg & 0xfffffff0) | 0x3;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
+
+ /* Set "tx security buffer almost full threshold" to 0x15 so that the
+ * almost full indication is generated only after buffer contains at
+ * least an entire jumbo packet.
+ */
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
+ reg = (reg & 0xfffffc00) | 0x15;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, reg);
+
+ /* restart the data paths by clearing the DISABLE bits */
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_STORE_FORWARD);
+
+ /* enable Rx and Tx SA lookup */
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, IXGBE_RXTXIDX_IPS_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, IXGBE_RXTXIDX_IPS_EN);
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_ipsec_restore - restore the ipsec HW settings after a reset
+ * @adapter: board private structure
+ **/
+void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ipsec *ipsec = adapter->ipsec;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED))
+ return;
+
+ /* clean up and restart the engine */
+ ixgbe_ipsec_stop_engine(adapter);
+ ixgbe_ipsec_clear_hw_tables(adapter);
+ ixgbe_ipsec_start_engine(adapter);
+
+ /* reload the IP addrs */
+ for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT; i++) {
+ struct rx_ip_sa *ipsa = &ipsec->ip_tbl[i];
+
+ if (ipsa->used)
+ ixgbe_ipsec_set_rx_ip(hw, i, ipsa->ipaddr);
+ }
+
+ /* reload the Rx and Tx keys */
+ for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
+ struct rx_sa *rsa = &ipsec->rx_tbl[i];
+ struct tx_sa *tsa = &ipsec->tx_tbl[i];
+
+ if (rsa->used)
+ ixgbe_ipsec_set_rx_sa(hw, i, rsa->xs->id.spi,
+ rsa->key, rsa->salt,
+ rsa->mode, rsa->iptbl_ind);
+
+ if (tsa->used)
+ ixgbe_ipsec_set_tx_sa(hw, i, tsa->key, tsa->salt);
+ }
+}
+
+/**
+ * ixgbe_ipsec_find_empty_idx - find the first unused security parameter index
+ * @ipsec: pointer to ipsec struct
+ * @rxtable: true if we need to look in the Rx table
+ *
+ * Returns the first unused index in either the Rx or Tx SA table
+ **/
+static int ixgbe_ipsec_find_empty_idx(struct ixgbe_ipsec *ipsec, bool rxtable)
+{
+ u32 i;
+
+ if (rxtable) {
+ if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
+ return -ENOSPC;
+
+ /* search rx sa table */
+ for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
+ if (!ipsec->rx_tbl[i].used)
+ return i;
+ }
+ } else {
+ if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
+ return -ENOSPC;
+
+ /* search tx sa table */
+ for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
+ if (!ipsec->tx_tbl[i].used)
+ return i;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+/**
+ * ixgbe_ipsec_find_rx_state - find the state that matches
+ * @ipsec: pointer to ipsec struct
+ * @daddr: inbound address to match
+ * @proto: protocol to match
+ * @spi: SPI to match
+ * @ip4: true if using an ipv4 address
+ *
+ * Returns a pointer to the matching SA state information
+ **/
+static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
+ __be32 *daddr, u8 proto,
+ __be32 spi, bool ip4)
+{
+ struct rx_sa *rsa;
+ struct xfrm_state *ret = NULL;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, spi)
+ if (spi == rsa->xs->id.spi &&
+ ((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
+ (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
+ sizeof(rsa->xs->id.daddr.a6)))) &&
+ proto == rsa->xs->id.proto) {
+ ret = rsa->xs;
+ xfrm_state_hold(ret);
+ break;
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+/**
+ * ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol
+ * @xs: pointer to xfrm_state struct
+ * @mykey: pointer to key array to populate
+ * @mysalt: pointer to salt value to populate
+ *
+ * This copies the protocol keys and salt to our own data tables. The
+ * 82599 family only supports the one algorithm.
+ **/
+static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
+ u32 *mykey, u32 *mysalt)
+{
+ struct net_device *dev = xs->xso.dev;
+ unsigned char *key_data;
+ char *alg_name = NULL;
+ const char aes_gcm_name[] = "rfc4106(gcm(aes))";
+ int key_len;
+
+ if (xs->aead) {
+ key_data = &xs->aead->alg_key[0];
+ key_len = xs->aead->alg_key_len;
+ alg_name = xs->aead->alg_name;
+ } else {
+ netdev_err(dev, "Unsupported IPsec algorithm\n");
+ return -EINVAL;
+ }
+
+ if (strcmp(alg_name, aes_gcm_name)) {
+ netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
+ aes_gcm_name);
+ return -EINVAL;
+ }
+
+ /* The key bytes come down in a bigendian array of bytes, so
+ * we don't need to do any byteswapping.
+ * 160 accounts for 16 byte key and 4 byte salt
+ */
+ if (key_len == 160) {
+ *mysalt = ((u32 *)key_data)[4];
+ } else if (key_len != 128) {
+ netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n");
+ return -EINVAL;
+ } else {
+ netdev_info(dev, "IPsec hw offload parameters missing 32 bit salt value\n");
+ *mysalt = 0;
+ }
+ memcpy(mykey, key_data, 16);
+
+ return 0;
+}
+
+/**
+ * ixgbe_ipsec_add_sa - program device with a security association
+ * @xs: pointer to transformer state struct
+ **/
+static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
+{
+ struct net_device *dev = xs->xso.dev;
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_ipsec *ipsec = adapter->ipsec;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int checked, match, first;
+ u16 sa_idx;
+ int ret;
+ int i;
+
+ if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
+ netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n",
+ xs->id.proto);
+ return -EINVAL;
+ }
+
+ if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ struct rx_sa rsa;
+
+ if (xs->calg) {
+ netdev_err(dev, "Compression offload not supported\n");
+ return -EINVAL;
+ }
+
+ /* find the first unused index */
+ ret = ixgbe_ipsec_find_empty_idx(ipsec, true);
+ if (ret < 0) {
+ netdev_err(dev, "No space for SA in Rx table!\n");
+ return ret;
+ }
+ sa_idx = (u16)ret;
+
+ memset(&rsa, 0, sizeof(rsa));
+ rsa.used = true;
+ rsa.xs = xs;
+
+ if (rsa.xs->id.proto & IPPROTO_ESP)
+ rsa.decrypt = xs->ealg || xs->aead;
+
+ /* get the key and salt */
+ ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
+ if (ret) {
+ netdev_err(dev, "Failed to get key data for Rx SA table\n");
+ return ret;
+ }
+
+ /* get ip for rx sa table */
+ if (xs->props.family == AF_INET6)
+ memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16);
+ else
+ memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4);
+
+ /* The HW does not have a 1:1 mapping from keys to IP addrs, so
+ * check for a matching IP addr entry in the table. If the addr
+ * already exists, use it; else find an unused slot and add the
+ * addr. If one does not exist and there are no unused table
+ * entries, fail the request.
+ */
+
+ /* Find an existing match or first not used, and stop looking
+ * after we've checked all we know we have.
+ */
+ checked = 0;
+ match = -1;
+ first = -1;
+ for (i = 0;
+ i < IXGBE_IPSEC_MAX_RX_IP_COUNT &&
+ (checked < ipsec->num_rx_sa || first < 0);
+ i++) {
+ if (ipsec->ip_tbl[i].used) {
+ if (!memcmp(ipsec->ip_tbl[i].ipaddr,
+ rsa.ipaddr, sizeof(rsa.ipaddr))) {
+ match = i;
+ break;
+ }
+ checked++;
+ } else if (first < 0) {
+ first = i; /* track the first empty seen */
+ }
+ }
+
+ if (ipsec->num_rx_sa == 0)
+ first = 0;
+
+ if (match >= 0) {
+ /* addrs are the same, we should use this one */
+ rsa.iptbl_ind = match;
+ ipsec->ip_tbl[match].ref_cnt++;
+
+ } else if (first >= 0) {
+ /* no matches, but here's an empty slot */
+ rsa.iptbl_ind = first;
+
+ memcpy(ipsec->ip_tbl[first].ipaddr,
+ rsa.ipaddr, sizeof(rsa.ipaddr));
+ ipsec->ip_tbl[first].ref_cnt = 1;
+ ipsec->ip_tbl[first].used = true;
+
+ ixgbe_ipsec_set_rx_ip(hw, rsa.iptbl_ind, rsa.ipaddr);
+
+ } else {
+ /* no match and no empty slot */
+ netdev_err(dev, "No space for SA in Rx IP SA table\n");
+ memset(&rsa, 0, sizeof(rsa));
+ return -ENOSPC;
+ }
+
+ rsa.mode = IXGBE_RXMOD_VALID;
+ if (rsa.xs->id.proto & IPPROTO_ESP)
+ rsa.mode |= IXGBE_RXMOD_PROTO_ESP;
+ if (rsa.decrypt)
+ rsa.mode |= IXGBE_RXMOD_DECRYPT;
+ if (rsa.xs->props.family == AF_INET6)
+ rsa.mode |= IXGBE_RXMOD_IPV6;
+
+ /* the preparations worked, so save the info */
+ memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa));
+
+ ixgbe_ipsec_set_rx_sa(hw, sa_idx, rsa.xs->id.spi, rsa.key,
+ rsa.salt, rsa.mode, rsa.iptbl_ind);
+ xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX;
+
+ ipsec->num_rx_sa++;
+
+ /* hash the new entry for faster search in Rx path */
+ hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist,
+ rsa.xs->id.spi);
+ } else {
+ struct tx_sa tsa;
+
+ /* find the first unused index */
+ ret = ixgbe_ipsec_find_empty_idx(ipsec, false);
+ if (ret < 0) {
+ netdev_err(dev, "No space for SA in Tx table\n");
+ return ret;
+ }
+ sa_idx = (u16)ret;
+
+ memset(&tsa, 0, sizeof(tsa));
+ tsa.used = true;
+ tsa.xs = xs;
+
+ if (xs->id.proto & IPPROTO_ESP)
+ tsa.encrypt = xs->ealg || xs->aead;
+
+ ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
+ if (ret) {
+ netdev_err(dev, "Failed to get key data for Tx SA table\n");
+ memset(&tsa, 0, sizeof(tsa));
+ return ret;
+ }
+
+ /* the preparations worked, so save the info */
+ memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa));
+
+ ixgbe_ipsec_set_tx_sa(hw, sa_idx, tsa.key, tsa.salt);
+
+ xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX;
+
+ ipsec->num_tx_sa++;
+ }
+
+ /* enable the engine if not already warmed up */
+ if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED)) {
+ ixgbe_ipsec_start_engine(adapter);
+ adapter->flags2 |= IXGBE_FLAG2_IPSEC_ENABLED;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_ipsec_del_sa - clear out this specific SA
+ * @xs: pointer to transformer state struct
+ **/
+static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
+{
+ struct net_device *dev = xs->xso.dev;
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_ipsec *ipsec = adapter->ipsec;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 zerobuf[4] = {0, 0, 0, 0};
+ u16 sa_idx;
+
+ if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ struct rx_sa *rsa;
+ u8 ipi;
+
+ sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
+ rsa = &ipsec->rx_tbl[sa_idx];
+
+ if (!rsa->used) {
+ netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n",
+ sa_idx, xs->xso.offload_handle);
+ return;
+ }
+
+ ixgbe_ipsec_set_rx_sa(hw, sa_idx, 0, zerobuf, 0, 0, 0);
+ hash_del_rcu(&rsa->hlist);
+
+ /* if the IP table entry is referenced by only this SA,
+ * i.e. ref_cnt is only 1, clear the IP table entry as well
+ */
+ ipi = rsa->iptbl_ind;
+ if (ipsec->ip_tbl[ipi].ref_cnt > 0) {
+ ipsec->ip_tbl[ipi].ref_cnt--;
+
+ if (!ipsec->ip_tbl[ipi].ref_cnt) {
+ memset(&ipsec->ip_tbl[ipi], 0,
+ sizeof(struct rx_ip_sa));
+ ixgbe_ipsec_set_rx_ip(hw, ipi, zerobuf);
+ }
+ }
+
+ memset(rsa, 0, sizeof(struct rx_sa));
+ ipsec->num_rx_sa--;
+ } else {
+ sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
+
+ if (!ipsec->tx_tbl[sa_idx].used) {
+ netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n",
+ sa_idx, xs->xso.offload_handle);
+ return;
+ }
+
+ ixgbe_ipsec_set_tx_sa(hw, sa_idx, zerobuf, 0);
+ memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa));
+ ipsec->num_tx_sa--;
+ }
+
+ /* if there are no SAs left, stop the engine to save energy */
+ if (ipsec->num_rx_sa == 0 && ipsec->num_tx_sa == 0) {
+ adapter->flags2 &= ~IXGBE_FLAG2_IPSEC_ENABLED;
+ ixgbe_ipsec_stop_engine(adapter);
+ }
+}
+
+/**
+ * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload
+ * @skb: current data packet
+ * @xs: pointer to transformer state struct
+ **/
+static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
+{
+ if (xs->props.family == AF_INET) {
+ /* Offload with IPv4 options is not supported yet */
+ if (ip_hdr(skb)->ihl != 5)
+ return false;
+ } else {
+ /* Offload with IPv6 extension headers is not support yet */
+ if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ixgbe_ipsec_free - called by xfrm garbage collections
+ * @xs: pointer to transformer state struct
+ *
+ * We don't have any garbage to collect, so we shouldn't bother
+ * implementing this function, but the XFRM code doesn't check for
+ * existence before calling the API callback.
+ **/
+static void ixgbe_ipsec_free(struct xfrm_state *xs)
+{
+}
+
+static const struct xfrmdev_ops ixgbe_xfrmdev_ops = {
+ .xdo_dev_state_add = ixgbe_ipsec_add_sa,
+ .xdo_dev_state_delete = ixgbe_ipsec_del_sa,
+ .xdo_dev_offload_ok = ixgbe_ipsec_offload_ok,
+ .xdo_dev_state_free = ixgbe_ipsec_free,
+};
+
+/**
+ * ixgbe_ipsec_tx - setup Tx flags for ipsec offload
+ * @tx_ring: outgoing context
+ * @first: current data packet
+ * @itd: ipsec Tx data for later use in building context descriptor
+ **/
+int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
+ struct ixgbe_tx_buffer *first,
+ struct ixgbe_ipsec_tx_data *itd)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
+ struct ixgbe_ipsec *ipsec = adapter->ipsec;
+ struct xfrm_state *xs;
+ struct tx_sa *tsa;
+
+ if (unlikely(!first->skb->sp->len)) {
+ netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
+ __func__, first->skb->sp->len);
+ return 0;
+ }
+
+ xs = xfrm_input_state(first->skb);
+ if (unlikely(!xs)) {
+ netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n",
+ __func__, xs);
+ return 0;
+ }
+
+ itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
+ if (unlikely(itd->sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) {
+ netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
+ __func__, itd->sa_idx, xs->xso.offload_handle);
+ return 0;
+ }
+
+ tsa = &ipsec->tx_tbl[itd->sa_idx];
+ if (unlikely(!tsa->used)) {
+ netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n",
+ __func__, itd->sa_idx);
+ return 0;
+ }
+
+ first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CC;
+
+ itd->flags = 0;
+ if (xs->id.proto == IPPROTO_ESP) {
+ itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
+ IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ if (first->protocol == htons(ETH_P_IP))
+ itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4;
+ itd->trailer_len = xs->props.trailer_len;
+ }
+ if (tsa->encrypt)
+ itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN;
+
+ return 1;
+}
+
+/**
+ * ixgbe_ipsec_rx - decode ipsec bits from Rx descriptor
+ * @rx_ring: receiving ring
+ * @rx_desc: receive data descriptor
+ * @skb: current data packet
+ *
+ * Determine if there was an ipsec encapsulation noticed, and if so set up
+ * the resulting status for later in the receive stack.
+ **/
+void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev);
+ __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+ __le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
+ IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
+ struct ixgbe_ipsec *ipsec = adapter->ipsec;
+ struct xfrm_offload *xo = NULL;
+ struct xfrm_state *xs = NULL;
+ struct ipv6hdr *ip6 = NULL;
+ struct iphdr *ip4 = NULL;
+ void *daddr;
+ __be32 spi;
+ u8 *c_hdr;
+ u8 proto;
+
+ /* Find the ip and crypto headers in the data.
+ * We can assume no vlan header in the way, b/c the
+ * hw won't recognize the IPsec packet and anyway the
+ * currently vlan device doesn't support xfrm offload.
+ */
+ if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) {
+ ip4 = (struct iphdr *)(skb->data + ETH_HLEN);
+ daddr = &ip4->daddr;
+ c_hdr = (u8 *)ip4 + ip4->ihl * 4;
+ } else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) {
+ ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
+ daddr = &ip6->daddr;
+ c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr);
+ } else {
+ return;
+ }
+
+ switch (pkt_info & ipsec_pkt_types) {
+ case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH):
+ spi = ((struct ip_auth_hdr *)c_hdr)->spi;
+ proto = IPPROTO_AH;
+ break;
+ case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP):
+ spi = ((struct ip_esp_hdr *)c_hdr)->spi;
+ proto = IPPROTO_ESP;
+ break;
+ default:
+ return;
+ }
+
+ xs = ixgbe_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4);
+ if (unlikely(!xs))
+ return;
+
+ skb->sp = secpath_dup(skb->sp);
+ if (unlikely(!skb->sp))
+ return;
+
+ skb->sp->xvec[skb->sp->len++] = xs;
+ skb->sp->olen++;
+ xo = xfrm_offload(skb);
+ xo->flags = CRYPTO_DONE;
+ xo->status = CRYPTO_SUCCESS;
+
+ adapter->rx_ipsec++;
+}
+
+/**
+ * ixgbe_init_ipsec_offload - initialize security registers for IPSec operation
+ * @adapter: board private structure
+ **/
+void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ipsec *ipsec;
+ size_t size;
+
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ return;
+
+ ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
+ if (!ipsec)
+ goto err1;
+ hash_init(ipsec->rx_sa_list);
+
+ size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
+ ipsec->rx_tbl = kzalloc(size, GFP_KERNEL);
+ if (!ipsec->rx_tbl)
+ goto err2;
+
+ size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
+ ipsec->tx_tbl = kzalloc(size, GFP_KERNEL);
+ if (!ipsec->tx_tbl)
+ goto err2;
+
+ size = sizeof(struct rx_ip_sa) * IXGBE_IPSEC_MAX_RX_IP_COUNT;
+ ipsec->ip_tbl = kzalloc(size, GFP_KERNEL);
+ if (!ipsec->ip_tbl)
+ goto err2;
+
+ ipsec->num_rx_sa = 0;
+ ipsec->num_tx_sa = 0;
+
+ adapter->ipsec = ipsec;
+ ixgbe_ipsec_stop_engine(adapter);
+ ixgbe_ipsec_clear_hw_tables(adapter);
+
+ adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops;
+ adapter->netdev->features |= NETIF_F_HW_ESP;
+ adapter->netdev->hw_enc_features |= NETIF_F_HW_ESP;
+
+ return;
+
+err2:
+ kfree(ipsec->ip_tbl);
+ kfree(ipsec->rx_tbl);
+ kfree(ipsec->tx_tbl);
+err1:
+ kfree(adapter->ipsec);
+ netdev_err(adapter->netdev, "Unable to allocate memory for SA tables");
+}
+
+/**
+ * ixgbe_stop_ipsec_offload - tear down the ipsec offload
+ * @adapter: board private structure
+ **/
+void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ipsec *ipsec = adapter->ipsec;
+
+ adapter->ipsec = NULL;
+ if (ipsec) {
+ kfree(ipsec->ip_tbl);
+ kfree(ipsec->rx_tbl);
+ kfree(ipsec->tx_tbl);
+ kfree(ipsec);
+ }
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
new file mode 100644
index 000000000000..da3ce7849e85
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
@@ -0,0 +1,93 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program. If not, see <http://www.gnu.org/licenses/>.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_IPSEC_H_
+#define _IXGBE_IPSEC_H_
+
+#define IXGBE_IPSEC_MAX_SA_COUNT 1024
+#define IXGBE_IPSEC_MAX_RX_IP_COUNT 128
+#define IXGBE_IPSEC_BASE_RX_INDEX 0
+#define IXGBE_IPSEC_BASE_TX_INDEX IXGBE_IPSEC_MAX_SA_COUNT
+
+#define IXGBE_RXTXIDX_IPS_EN 0x00000001
+#define IXGBE_RXIDX_TBL_SHIFT 1
+enum ixgbe_ipsec_tbl_sel {
+ ips_rx_ip_tbl = 0x01,
+ ips_rx_spi_tbl = 0x02,
+ ips_rx_key_tbl = 0x03,
+};
+
+#define IXGBE_RXTXIDX_IDX_SHIFT 3
+#define IXGBE_RXTXIDX_READ 0x40000000
+#define IXGBE_RXTXIDX_WRITE 0x80000000
+
+#define IXGBE_RXMOD_VALID 0x00000001
+#define IXGBE_RXMOD_PROTO_ESP 0x00000004
+#define IXGBE_RXMOD_DECRYPT 0x00000008
+#define IXGBE_RXMOD_IPV6 0x00000010
+
+struct rx_sa {
+ struct hlist_node hlist;
+ struct xfrm_state *xs;
+ __be32 ipaddr[4];
+ u32 key[4];
+ u32 salt;
+ u32 mode;
+ u8 iptbl_ind;
+ bool used;
+ bool decrypt;
+};
+
+struct rx_ip_sa {
+ __be32 ipaddr[4];
+ u32 ref_cnt;
+ bool used;
+};
+
+struct tx_sa {
+ struct xfrm_state *xs;
+ u32 key[4];
+ u32 salt;
+ bool encrypt;
+ bool used;
+};
+
+struct ixgbe_ipsec_tx_data {
+ u32 flags;
+ u16 trailer_len;
+ u16 sa_idx;
+};
+
+struct ixgbe_ipsec {
+ u16 num_rx_sa;
+ u16 num_tx_sa;
+ struct rx_ip_sa *ip_tbl;
+ struct rx_sa *rx_tbl;
+ struct tx_sa *tx_tbl;
+ DECLARE_HASHTABLE(rx_sa_list, 10);
+};
+#endif /* _IXGBE_IPSEC_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 8e2a957aca18..4242f0213e46 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -46,8 +46,8 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
int i;
- u16 reg_idx;
- u8 tcs = netdev_get_num_tc(adapter->netdev);
+ u16 reg_idx, pool;
+ u8 tcs = adapter->hw_tcs;
/* verify we have DCB queueing enabled before proceeding */
if (tcs <= 1)
@@ -58,12 +58,16 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
return false;
/* start at VMDq register offset for SR-IOV enabled setups */
+ pool = 0;
reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
- for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
+ for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
/* If we are greater than indices move to next pool */
- if ((reg_idx & ~vmdq->mask) >= tcs)
+ if ((reg_idx & ~vmdq->mask) >= tcs) {
+ pool++;
reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ }
adapter->rx_ring[i]->reg_idx = reg_idx;
+ adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
}
reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
@@ -92,6 +96,7 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
adapter->rx_ring[i]->reg_idx = reg_idx;
+ adapter->rx_ring[i]->netdev = adapter->netdev;
reg_idx++;
}
@@ -111,9 +116,8 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
unsigned int *tx, unsigned int *rx)
{
- struct net_device *dev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- u8 num_tcs = netdev_get_num_tc(dev);
+ u8 num_tcs = adapter->hw_tcs;
*tx = 0;
*rx = 0;
@@ -168,10 +172,9 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
**/
static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
{
- struct net_device *dev = adapter->netdev;
+ u8 num_tcs = adapter->hw_tcs;
unsigned int tx_idx, rx_idx;
int tc, offset, rss_i, i;
- u8 num_tcs = netdev_get_num_tc(dev);
/* verify we have DCB queueing enabled before proceeding */
if (num_tcs <= 1)
@@ -184,6 +187,7 @@ static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
adapter->tx_ring[offset + i]->reg_idx = tx_idx;
adapter->rx_ring[offset + i]->reg_idx = rx_idx;
+ adapter->rx_ring[offset + i]->netdev = adapter->netdev;
adapter->tx_ring[offset + i]->dcb_tc = tc;
adapter->rx_ring[offset + i]->dcb_tc = tc;
}
@@ -208,14 +212,15 @@ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
+ u16 reg_idx, pool;
int i;
- u16 reg_idx;
/* only proceed if VMDq is enabled */
if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
return false;
/* start at VMDq register offset for SR-IOV enabled setups */
+ pool = 0;
reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
#ifdef IXGBE_FCOE
@@ -224,15 +229,20 @@ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
break;
#endif
/* If we are greater than indices move to next pool */
- if ((reg_idx & ~vmdq->mask) >= rss->indices)
+ if ((reg_idx & ~vmdq->mask) >= rss->indices) {
+ pool++;
reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ }
adapter->rx_ring[i]->reg_idx = reg_idx;
+ adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
}
#ifdef IXGBE_FCOE
/* FCoE uses a linear block of queues so just assigning 1:1 */
- for (; i < adapter->num_rx_queues; i++, reg_idx++)
+ for (; i < adapter->num_rx_queues; i++, reg_idx++) {
adapter->rx_ring[i]->reg_idx = reg_idx;
+ adapter->rx_ring[i]->netdev = adapter->netdev;
+ }
#endif
reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
@@ -269,8 +279,10 @@ static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
{
int i, reg_idx;
- for (i = 0; i < adapter->num_rx_queues; i++)
+ for (i = 0; i < adapter->num_rx_queues; i++) {
adapter->rx_ring[i]->reg_idx = i;
+ adapter->rx_ring[i]->netdev = adapter->netdev;
+ }
for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++)
adapter->tx_ring[i]->reg_idx = reg_idx;
for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++)
@@ -340,7 +352,7 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
#ifdef IXGBE_FCOE
u16 fcoe_i = 0;
#endif
- u8 tcs = netdev_get_num_tc(adapter->netdev);
+ u8 tcs = adapter->hw_tcs;
/* verify we have DCB queueing enabled before proceeding */
if (tcs <= 1)
@@ -350,6 +362,9 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return false;
+ /* limit VMDq instances on the PF by number of Tx queues */
+ vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs);
+
/* Add starting offset to total pool count */
vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
@@ -437,7 +452,7 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
int tcs;
/* Map queue offset and counts onto allocated tx queues */
- tcs = netdev_get_num_tc(dev);
+ tcs = adapter->hw_tcs;
/* verify we have DCB queueing enabled before proceeding */
if (tcs <= 1)
@@ -512,12 +527,14 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
#ifdef IXGBE_FCOE
u16 fcoe_i = 0;
#endif
- bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
/* only proceed if SR-IOV is enabled */
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return false;
+ /* limit l2fwd RSS based on total Tx queue limit */
+ rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i);
+
/* Add starting offset to total pool count */
vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
@@ -525,7 +542,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
/* 64 pool mode with 2 queues per pool */
- if ((vmdq_i > 32) || (vmdq_i > 16 && pools)) {
+ if (vmdq_i > 32) {
vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
rss_m = IXGBE_RSS_2Q_MASK;
rss_i = min_t(u16, rss_i, 2);
@@ -602,6 +619,10 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
}
#endif
+ /* populate TC0 for use by pool 0 */
+ netdev_set_tc_queue(adapter->netdev, 0,
+ adapter->num_rx_queues_per_pool, 0);
+
return true;
}
@@ -701,7 +722,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
adapter->num_xdp_queues = 0;
- adapter->num_rx_pools = adapter->num_rx_queues;
+ adapter->num_rx_pools = 1;
adapter->num_rx_queues_per_pool = 1;
#ifdef CONFIG_IXGBE_DCB
@@ -834,7 +855,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
int node = NUMA_NO_NODE;
int cpu = -1;
int ring_count, size;
- u8 tcs = netdev_get_num_tc(adapter->netdev);
+ u8 tcs = adapter->hw_tcs;
ring_count = txr_count + rxr_count + xdp_count;
size = sizeof(struct ixgbe_q_vector) +
@@ -917,11 +938,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
/* apply Tx specific ring traits */
ring->count = adapter->tx_ring_count;
- if (adapter->num_rx_pools > 1)
- ring->queue_index =
- txr_idx % adapter->num_rx_queues_per_pool;
- else
- ring->queue_index = txr_idx;
+ ring->queue_index = txr_idx;
/* assign ring to adapter */
adapter->tx_ring[txr_idx] = ring;
@@ -991,11 +1008,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
#endif /* IXGBE_FCOE */
/* apply Rx specific ring traits */
ring->count = adapter->rx_ring_count;
- if (adapter->num_rx_pools > 1)
- ring->queue_index =
- rxr_idx % adapter->num_rx_queues_per_pool;
- else
- ring->queue_index = rxr_idx;
+ ring->queue_index = rxr_idx;
/* assign ring to adapter */
adapter->rx_ring[rxr_idx] = ring;
@@ -1171,7 +1184,7 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
*/
/* Disable DCB unless we only have a single traffic class */
- if (netdev_get_num_tc(adapter->netdev) > 1) {
+ if (adapter->hw_tcs > 1) {
e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
netdev_reset_tc(adapter->netdev);
@@ -1183,6 +1196,7 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
adapter->dcb_cfg.pfc_mode_enable = false;
}
+ adapter->hw_tcs = 0;
adapter->dcb_cfg.num_tcs.pg_tcs = 1;
adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
@@ -1268,7 +1282,7 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
}
void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
- u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
+ u32 fceof_saidx, u32 type_tucmd, u32 mss_l4len_idx)
{
struct ixgbe_adv_tx_context_desc *context_desc;
u16 i = tx_ring->next_to_use;
@@ -1282,7 +1296,7 @@ void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
- context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
+ context_desc->fceof_saidx = cpu_to_le32(fceof_saidx);
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 62a18914f00f..0da5aa2c8aba 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -192,6 +192,13 @@ static struct workqueue_struct *ixgbe_wq;
static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
+static const struct net_device_ops ixgbe_netdev_ops;
+
+static bool netif_is_ixgbe(struct net_device *dev)
+{
+ return dev && (dev->netdev_ops == &ixgbe_netdev_ops);
+}
+
static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
u32 reg, u16 *value)
{
@@ -1064,24 +1071,12 @@ static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
{
- struct ixgbe_adapter *adapter;
- struct ixgbe_hw *hw;
- u32 head, tail;
+ unsigned int head, tail;
- if (ring->l2_accel_priv)
- adapter = ring->l2_accel_priv->real_adapter;
- else
- adapter = netdev_priv(ring->netdev);
+ head = ring->next_to_clean;
+ tail = ring->next_to_use;
- hw = &adapter->hw;
- head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
- tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
-
- if (head != tail)
- return (head < tail) ?
- tail - head : (tail + ring->count - head);
-
- return 0;
+ return ((head <= tail) ? tail : tail + ring->count) - head;
}
static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
@@ -1133,6 +1128,9 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
/**
* ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate
+ * @netdev: network interface device structure
+ * @queue_index: Tx queue to set
+ * @maxrate: desired maximum transmit bitrate
**/
static int ixgbe_tx_maxrate(struct net_device *netdev,
int queue_index, u32 maxrate)
@@ -1173,7 +1171,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
- unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
unsigned int budget = q_vector->tx.work_limit;
unsigned int i = tx_ring->next_to_clean;
@@ -1204,6 +1202,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
/* update the statistics for this packet */
total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs;
+ if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
+ total_ipsec++;
/* free the skb */
if (ring_is_xdp(tx_ring))
@@ -1266,6 +1266,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
u64_stats_update_end(&tx_ring->syncp);
q_vector->tx.total_bytes += total_bytes;
q_vector->tx.total_packets += total_packets;
+ adapter->tx_ipsec += total_ipsec;
if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
/* schedule immediate reset if we believe we hung */
@@ -1754,9 +1755,18 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
}
- skb_record_rx_queue(skb, rx_ring->queue_index);
+ if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
+ ixgbe_ipsec_rx(rx_ring, rx_desc, skb);
skb->protocol = eth_type_trans(skb, dev);
+
+ /* record Rx queue, or update MACVLAN statistics */
+ if (netif_is_ixgbe(dev))
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ else
+ macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
+ (skb->pkt_type == PACKET_BROADCAST) ||
+ (skb->pkt_type == PACKET_MULTICAST));
}
static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
@@ -1921,10 +1931,13 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
if (IS_ERR(skb))
return true;
- /* verify that the packet does not have any known errors */
- if (unlikely(ixgbe_test_staterr(rx_desc,
- IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
- !(netdev->features & NETIF_F_RXALL))) {
+ /* Verify netdev is present, and that packet does not have any
+ * errors that would be unacceptable to the netdev.
+ */
+ if (!netdev ||
+ (unlikely(ixgbe_test_staterr(rx_desc,
+ IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
+ !(netdev->features & NETIF_F_RXALL)))) {
dev_kfree_skb_any(skb);
return true;
}
@@ -2021,8 +2034,8 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
* ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add
- * @rx_desc: descriptor containing length of buffer written by hardware
* @skb: sk_buff to place the data into
+ * @size: size of data in rx_buffer
*
* This function will add the data contained in rx_buffer->page to the skb.
* This is done either through a direct copy if the data in the buffer is
@@ -2318,12 +2331,14 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
#endif /* IXGBE_FCOE */
u16 cleaned_count = ixgbe_desc_unused(rx_ring);
bool xdp_xmit = false;
+ struct xdp_buff xdp;
+
+ xdp.rxq = &rx_ring->xdp_rxq;
while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *rx_buffer;
struct sk_buff *skb;
- struct xdp_buff xdp;
unsigned int size;
/* return some buffers to hardware, one at a time is too slow */
@@ -2515,13 +2530,6 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
}
-enum latency_range {
- lowest_latency = 0,
- low_latency = 1,
- bulk_latency = 2,
- latency_invalid = 255
-};
-
/**
* ixgbe_update_itr - update the dynamic ITR value based on statistics
* @q_vector: structure containing interrupt and ring information
@@ -2534,8 +2542,6 @@ enum latency_range {
* based on theoretical maximum wire speed and thresholds were set based
* on testing data as well as attempting to minimize response time
* while increasing bulk throughput.
- * this functionality is controlled by the InterruptThrottleRate module
- * parameter (see ixgbe_param.c)
**/
static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring_container *ring_container)
@@ -3020,6 +3026,8 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
/**
* ixgbe_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
+ * @queues: enable irqs for queues
+ * @flush: flush register write
**/
static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
bool flush)
@@ -3475,6 +3483,7 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
/**
* ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
+ * @adapter: board private structure
*
**/
static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
@@ -3586,7 +3595,7 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 rttdcs, mtqc;
- u8 tcs = netdev_get_num_tc(adapter->netdev);
+ u8 tcs = adapter->hw_tcs;
if (hw->mac.type == ixgbe_mac_82598EB)
return;
@@ -3853,16 +3862,20 @@ static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
struct ixgbe_hw *hw = &adapter->hw;
u32 vfreta = 0;
- unsigned int pf_pool = adapter->num_vfs;
/* Write redirection table to HW */
for (i = 0; i < reta_entries; i++) {
+ u16 pool = adapter->num_rx_pools;
+
vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8;
- if ((i & 3) == 3) {
- IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool),
+ if ((i & 3) != 3)
+ continue;
+
+ while (pool--)
+ IXGBE_WRITE_REG(hw,
+ IXGBE_PFVFRETA(i >> 2, VMDQ_P(pool)),
vfreta);
- vfreta = 0;
- }
+ vfreta = 0;
}
}
@@ -3899,13 +3912,17 @@ static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
- unsigned int pf_pool = adapter->num_vfs;
int i, j;
/* Fill out hash function seeds */
- for (i = 0; i < 10; i++)
- IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool),
- *(adapter->rss_key + i));
+ for (i = 0; i < 10; i++) {
+ u16 pool = adapter->num_rx_pools;
+
+ while (pool--)
+ IXGBE_WRITE_REG(hw,
+ IXGBE_PFVFRSSRK(i, VMDQ_P(pool)),
+ *(adapter->rss_key + i));
+ }
/* Fill out the redirection table */
for (i = 0, j = 0; i < 64; i++, j++) {
@@ -3933,7 +3950,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
if (adapter->ring_feature[RING_F_RSS].mask)
mrqc = IXGBE_MRQC_RSSEN;
} else {
- u8 tcs = netdev_get_num_tc(adapter->netdev);
+ u8 tcs = adapter->hw_tcs;
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
if (tcs > 4)
@@ -3971,7 +3988,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
if ((hw->mac.type >= ixgbe_mac_X550) &&
(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
- unsigned int pf_pool = adapter->num_vfs;
+ u16 pool = adapter->num_rx_pools;
/* Enable VF RSS mode */
mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
@@ -3981,7 +3998,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
ixgbe_setup_vfreta(adapter);
vfmrqc = IXGBE_MRQC_RSSEN;
vfmrqc |= rss_field;
- IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc);
+
+ while (pool--)
+ IXGBE_WRITE_REG(hw,
+ IXGBE_PFVFMRQC(VMDQ_P(pool)),
+ vfmrqc);
} else {
ixgbe_setup_reta(adapter);
mrqc |= rss_field;
@@ -3991,8 +4012,8 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
/**
* ixgbe_configure_rscctl - enable RSC for the indicated ring
- * @adapter: address of board private structure
- * @index: index of ring to set
+ * @adapter: address of board private structure
+ * @ring: structure containing ring specific data
**/
static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
@@ -4112,11 +4133,15 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
rxdctl &= ~0x3FFFFF;
rxdctl |= 0x080420;
#if (PAGE_SIZE < 8192)
- } else {
+ /* RXDCTL.RLPML does not work on 82599 */
+ } else if (hw->mac.type != ixgbe_mac_82599EB) {
rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
IXGBE_RXDCTL_RLPML_EN);
- /* Limit the maximum frame size so we don't overrun the skb */
+ /* Limit the maximum frame size so we don't overrun the skb.
+ * This can happen in SRIOV mode when the MTU of the VF is
+ * higher than the MTU of the PF.
+ */
if (ring_uses_build_skb(ring) &&
!test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB |
@@ -4144,7 +4169,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int rss_i = adapter->ring_feature[RING_F_RSS].indices;
- u16 pool;
+ u16 pool = adapter->num_rx_pools;
/* PSRTYPE must be initialized in non 82598 adapters */
u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
@@ -4161,7 +4186,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
else if (rss_i > 1)
psrtype |= 1u << 29;
- for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
+ while (pool--)
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
}
@@ -4488,8 +4513,9 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
- if (ring->l2_accel_priv)
+ if (!netif_is_ixgbe(ring->netdev))
continue;
+
j = ring->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
vlnctrl &= ~IXGBE_RXDCTL_VME;
@@ -4525,8 +4551,9 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
- if (ring->l2_accel_priv)
+ if (!netif_is_ixgbe(ring->netdev))
continue;
+
j = ring->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
vlnctrl |= IXGBE_RXDCTL_VME;
@@ -4846,9 +4873,11 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
return -ENOMEM;
}
+
/**
* ixgbe_write_uc_addr_list - write unicast addresses to RAR table
* @netdev: network interface device structure
+ * @vfn: pool to associate with unicast addresses
*
* Writes unicast address list to the RAR table.
* Returns: -ENOMEM on failure/insufficient address space
@@ -5195,7 +5224,7 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- int num_tc = netdev_get_num_tc(adapter->netdev);
+ int num_tc = adapter->hw_tcs;
int i;
if (!num_tc)
@@ -5218,7 +5247,7 @@ static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int hdrm;
- u8 tc = netdev_get_num_tc(adapter->netdev);
+ u8 tc = adapter->hw_tcs;
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@ -5277,29 +5306,6 @@ static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
}
-static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
-{
- struct ixgbe_adapter *adapter = vadapter->real_adapter;
- int rss_i = adapter->num_rx_queues_per_pool;
- struct ixgbe_hw *hw = &adapter->hw;
- u16 pool = vadapter->pool;
- u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
- IXGBE_PSRTYPE_UDPHDR |
- IXGBE_PSRTYPE_IPV4HDR |
- IXGBE_PSRTYPE_L2HDR |
- IXGBE_PSRTYPE_IPV6HDR;
-
- if (hw->mac.type == ixgbe_mac_82598EB)
- return;
-
- if (rss_i > 3)
- psrtype |= 2u << 29;
- else if (rss_i > 1)
- psrtype |= 1u << 29;
-
- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
-}
-
/**
* ixgbe_clean_rx_ring - Free Rx Buffers per Queue
* @rx_ring: ring to free buffers from
@@ -5352,96 +5358,45 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
rx_ring->next_to_use = 0;
}
-static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
- struct ixgbe_ring *rx_ring)
-{
- struct ixgbe_adapter *adapter = vadapter->real_adapter;
- int index = rx_ring->queue_index + vadapter->rx_base_queue;
-
- /* shutdown specific queue receive and wait for dma to settle */
- ixgbe_disable_rx_queue(adapter, rx_ring);
- usleep_range(10000, 20000);
- ixgbe_irq_disable_queues(adapter, BIT_ULL(index));
- ixgbe_clean_rx_ring(rx_ring);
- rx_ring->l2_accel_priv = NULL;
-}
-
-static int ixgbe_fwd_ring_down(struct net_device *vdev,
- struct ixgbe_fwd_adapter *accel)
-{
- struct ixgbe_adapter *adapter = accel->real_adapter;
- unsigned int rxbase = accel->rx_base_queue;
- unsigned int txbase = accel->tx_base_queue;
- int i;
-
- netif_tx_stop_all_queues(vdev);
-
- for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
- ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
- adapter->rx_ring[rxbase + i]->netdev = adapter->netdev;
- }
-
- for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
- adapter->tx_ring[txbase + i]->l2_accel_priv = NULL;
- adapter->tx_ring[txbase + i]->netdev = adapter->netdev;
- }
-
-
- return 0;
-}
-
static int ixgbe_fwd_ring_up(struct net_device *vdev,
struct ixgbe_fwd_adapter *accel)
{
struct ixgbe_adapter *adapter = accel->real_adapter;
- unsigned int rxbase, txbase, queues;
- int i, baseq, err = 0;
+ int i, baseq, err;
- if (!test_bit(accel->pool, &adapter->fwd_bitmask))
+ if (!test_bit(accel->pool, adapter->fwd_bitmask))
return 0;
baseq = accel->pool * adapter->num_rx_queues_per_pool;
- netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
+ netdev_dbg(vdev, "pool %i:%i queues %i:%i\n",
accel->pool, adapter->num_rx_pools,
- baseq, baseq + adapter->num_rx_queues_per_pool,
- adapter->fwd_bitmask);
+ baseq, baseq + adapter->num_rx_queues_per_pool);
accel->netdev = vdev;
- accel->rx_base_queue = rxbase = baseq;
- accel->tx_base_queue = txbase = baseq;
+ accel->rx_base_queue = baseq;
+ accel->tx_base_queue = baseq;
for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
- ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
+ adapter->rx_ring[baseq + i]->netdev = vdev;
- for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
- adapter->rx_ring[rxbase + i]->netdev = vdev;
- adapter->rx_ring[rxbase + i]->l2_accel_priv = accel;
- ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]);
- }
+ /* Guarantee all rings are updated before we update the
+ * MAC address filter.
+ */
+ wmb();
- for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
- adapter->tx_ring[txbase + i]->netdev = vdev;
- adapter->tx_ring[txbase + i]->l2_accel_priv = accel;
+ /* ixgbe_add_mac_filter will return an index if it succeeds, so we
+ * need to only treat it as an error value if it is negative.
+ */
+ err = ixgbe_add_mac_filter(adapter, vdev->dev_addr,
+ VMDQ_P(accel->pool));
+ if (err >= 0) {
+ ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
+ return 0;
}
- queues = min_t(unsigned int,
- adapter->num_rx_queues_per_pool, vdev->num_tx_queues);
- err = netif_set_real_num_tx_queues(vdev, queues);
- if (err)
- goto fwd_queue_err;
-
- err = netif_set_real_num_rx_queues(vdev, queues);
- if (err)
- goto fwd_queue_err;
-
- if (is_valid_ether_addr(vdev->dev_addr))
- ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool);
+ for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
+ adapter->rx_ring[baseq + i]->netdev = NULL;
- ixgbe_fwd_psrtype(accel);
- ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
- return err;
-fwd_queue_err:
- ixgbe_fwd_ring_down(vdev, accel);
return err;
}
@@ -5480,6 +5435,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
ixgbe_set_rx_mode(adapter->netdev);
ixgbe_restore_vlan(adapter);
+ ixgbe_ipsec_restore(adapter);
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
@@ -5917,21 +5873,6 @@ static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
spin_unlock(&adapter->fdir_perfect_lock);
}
-static int ixgbe_disable_macvlan(struct net_device *upper, void *data)
-{
- if (netif_is_macvlan(upper)) {
- struct macvlan_dev *vlan = netdev_priv(upper);
-
- if (vlan->fwd_priv) {
- netif_tx_stop_all_queues(upper);
- netif_carrier_off(upper);
- netif_tx_disable(upper);
- }
- }
-
- return 0;
-}
-
void ixgbe_down(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -5961,10 +5902,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
netif_carrier_off(netdev);
netif_tx_disable(netdev);
- /* disable any upper devices */
- netdev_walk_all_upper_dev_rcu(adapter->netdev,
- ixgbe_disable_macvlan, NULL);
-
ixgbe_irq_disable(adapter);
ixgbe_napi_disable_all(adapter);
@@ -6119,6 +6056,7 @@ static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
/**
* ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
* @adapter: board private structure to initialize
+ * @ii: pointer to ixgbe_info for device
*
* ixgbe_sw_init initializes the Adapter private data structure.
* Fields are initialized based on PCI device information and
@@ -6153,6 +6091,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
adapter->ring_feature[RING_F_FDIR].limit = fdir;
adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
+ adapter->ring_feature[RING_F_VMDQ].limit = 1;
#ifdef CONFIG_IXGBE_DCA
adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
#endif
@@ -6302,7 +6241,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
}
/* PF holds first pool slot */
- set_bit(0, &adapter->fwd_bitmask);
+ set_bit(0, adapter->fwd_bitmask);
set_bit(__IXGBE_DOWN, &adapter->state);
return 0;
@@ -6402,6 +6341,7 @@ err_setup_tx:
/**
* ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: pointer to ixgbe_adapter
* @rx_ring: rx descriptor ring (for a specific queue) to setup
*
* Returns 0 on success, negative on failure
@@ -6444,6 +6384,11 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
+ /* XDP RX-queue info */
+ if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
+ rx_ring->queue_index) < 0)
+ goto err;
+
rx_ring->xdp_prog = adapter->xdp_prog;
return 0;
@@ -6541,6 +6486,7 @@ void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
ixgbe_clean_rx_ring(rx_ring);
rx_ring->xdp_prog = NULL;
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
@@ -6646,20 +6592,12 @@ int ixgbe_open(struct net_device *netdev)
goto err_req_irq;
/* Notify the stack of the actual queue counts. */
- if (adapter->num_rx_pools > 1)
- queues = adapter->num_rx_queues_per_pool;
- else
- queues = adapter->num_tx_queues;
-
+ queues = adapter->num_tx_queues;
err = netif_set_real_num_tx_queues(netdev, queues);
if (err)
goto err_set_queues;
- if (adapter->num_rx_pools > 1 &&
- adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES)
- queues = IXGBE_MAX_L2A_QUEUES;
- else
- queues = adapter->num_rx_queues;
+ queues = adapter->num_rx_queues;
err = netif_set_real_num_rx_queues(netdev, queues);
if (err)
goto err_set_queues;
@@ -6783,7 +6721,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- u32 ctrl, fctrl;
+ u32 ctrl;
u32 wufc = adapter->wol;
#ifdef CONFIG_PM
int retval = 0;
@@ -6808,18 +6746,18 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
hw->mac.ops.stop_link_on_d3(hw);
if (wufc) {
+ u32 fctrl;
+
ixgbe_set_rx_mode(netdev);
/* enable the optics for 82599 SFP+ fiber as we can WoL */
if (hw->mac.ops.enable_tx_laser)
hw->mac.ops.enable_tx_laser(hw);
- /* turn on all-multi mode if wake on multicast is enabled */
- if (wufc & IXGBE_WUFC_MC) {
- fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
- fctrl |= IXGBE_FCTRL_MPE;
- IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
- }
+ /* enable the reception of multicast packets */
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl |= IXGBE_FCTRL_MPE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
ctrl |= IXGBE_CTRL_GIO_DIS;
@@ -7216,7 +7154,6 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
/**
* ixgbe_watchdog_update_link - update the link status
* @adapter: pointer to the device adapter structure
- * @link_speed: pointer to a u32 to store the link_speed
**/
static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
{
@@ -7273,18 +7210,6 @@ static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
#endif
}
-static int ixgbe_enable_macvlan(struct net_device *upper, void *data)
-{
- if (netif_is_macvlan(upper)) {
- struct macvlan_dev *vlan = netdev_priv(upper);
-
- if (vlan->fwd_priv)
- netif_tx_wake_all_queues(upper);
- }
-
- return 0;
-}
-
/**
* ixgbe_watchdog_link_is_up - update netif_carrier status and
* print link up message
@@ -7338,6 +7263,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
case IXGBE_LINK_SPEED_10GB_FULL:
speed_str = "10 Gbps";
break;
+ case IXGBE_LINK_SPEED_5GB_FULL:
+ speed_str = "5 Gbps";
+ break;
case IXGBE_LINK_SPEED_2_5GB_FULL:
speed_str = "2.5 Gbps";
break;
@@ -7365,12 +7293,6 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
/* enable transmits */
netif_tx_wake_all_queues(adapter->netdev);
- /* enable any upper devices */
- rtnl_lock();
- netdev_walk_all_upper_dev_rcu(adapter->netdev,
- ixgbe_enable_macvlan, NULL);
- rtnl_unlock();
-
/* update the default user priority for VFs */
ixgbe_update_default_up(adapter);
@@ -7655,6 +7577,7 @@ sfp_out:
static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ u32 cap_speed;
u32 speed;
bool autoneg = false;
@@ -7667,16 +7590,14 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
- speed = hw->phy.autoneg_advertised;
- if ((!speed) && (hw->mac.ops.get_link_capabilities)) {
- hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
+ hw->mac.ops.get_link_capabilities(hw, &cap_speed, &autoneg);
- /* setup the highest link when no autoneg */
- if (!autoneg) {
- if (speed & IXGBE_LINK_SPEED_10GB_FULL)
- speed = IXGBE_LINK_SPEED_10GB_FULL;
- }
- }
+ /* advertise highest capable link speed */
+ if (!autoneg && (cap_speed & IXGBE_LINK_SPEED_10GB_FULL))
+ speed = IXGBE_LINK_SPEED_10GB_FULL;
+ else
+ speed = cap_speed & (IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL);
if (hw->mac.ops.setup_link)
hw->mac.ops.setup_link(hw, speed, true);
@@ -7688,7 +7609,7 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
/**
* ixgbe_service_timer - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
+ * @t: pointer to timer_list structure
**/
static void ixgbe_service_timer(struct timer_list *t)
{
@@ -7888,10 +7809,12 @@ static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
}
static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
- struct ixgbe_tx_buffer *first)
+ struct ixgbe_tx_buffer *first,
+ struct ixgbe_ipsec_tx_data *itd)
{
struct sk_buff *skb = first->skb;
u32 vlan_macip_lens = 0;
+ u32 fceof_saidx = 0;
u32 type_tucmd = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
@@ -7932,7 +7855,12 @@ no_csum:
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
- ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0);
+ if (first->tx_flags & IXGBE_TX_FLAGS_IPSEC) {
+ fceof_saidx |= itd->sa_idx;
+ type_tucmd |= itd->flags | itd->trailer_len;
+ }
+
+ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 0);
}
#define IXGBE_SET_FLAG(_input, _flag, _result) \
@@ -7975,11 +7903,16 @@ static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
IXGBE_TX_FLAGS_CSUM,
IXGBE_ADVTXD_POPTS_TXSM);
- /* enble IPv4 checksum for TSO */
+ /* enable IPv4 checksum for TSO */
olinfo_status |= IXGBE_SET_FLAG(tx_flags,
IXGBE_TX_FLAGS_IPV4,
IXGBE_ADVTXD_POPTS_IXSM);
+ /* enable IPsec */
+ olinfo_status |= IXGBE_SET_FLAG(tx_flags,
+ IXGBE_TX_FLAGS_IPSEC,
+ IXGBE_ADVTXD_POPTS_IPSEC);
+
/*
* Check Context must be set if Tx switch is enabled, which it
* always is for case where virtual functions are running
@@ -8332,14 +8265,19 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
-#ifdef IXGBE_FCOE
struct ixgbe_adapter *adapter;
- struct ixgbe_ring_feature *f;
int txq;
+#ifdef IXGBE_FCOE
+ struct ixgbe_ring_feature *f;
#endif
- if (fwd_adapter)
- return skb->queue_mapping + fwd_adapter->tx_base_queue;
+ if (fwd_adapter) {
+ adapter = netdev_priv(dev);
+ txq = reciprocal_scale(skb_get_hash(skb),
+ adapter->num_rx_queues_per_pool);
+
+ return txq + fwd_adapter->tx_base_queue;
+ }
#ifdef IXGBE_FCOE
@@ -8438,6 +8376,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
u32 tx_flags = 0;
unsigned short f;
u16 count = TXD_USE_COUNT(skb_headlen(skb));
+ struct ixgbe_ipsec_tx_data ipsec_tx = { 0 };
__be16 protocol = skb->protocol;
u8 hdr_len = 0;
@@ -8542,11 +8481,16 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
}
#endif /* IXGBE_FCOE */
+
+#ifdef CONFIG_XFRM_OFFLOAD
+ if (skb->sp && !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
+ goto out_drop;
+#endif
tso = ixgbe_tso(tx_ring, first, &hdr_len);
if (tso < 0)
goto out_drop;
else if (!tso)
- ixgbe_tx_csum(tx_ring, first);
+ ixgbe_tx_csum(tx_ring, first, &ipsec_tx);
/* add the ATR filter if ATR is on */
if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
@@ -8671,7 +8615,7 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
/**
* ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
* netdev->dev_addrs
- * @netdev: network interface device structure
+ * @dev: network interface device structure
*
* Returns non-zero on failure
**/
@@ -8695,7 +8639,7 @@ static int ixgbe_add_sanmac_netdev(struct net_device *dev)
/**
* ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
* netdev->dev_addrs
- * @netdev: network interface device structure
+ * @dev: network interface device structure
*
* Returns non-zero on failure
**/
@@ -8862,14 +8806,13 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
/**
* ixgbe_setup_tc - configure net_device for multiple traffic classes
*
- * @netdev: net device to configure
+ * @dev: net device to configure
* @tc: number of traffic classes to enable
*/
int ixgbe_setup_tc(struct net_device *dev, u8 tc)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- bool pools;
/* Hardware supports up to 8 traffic classes */
if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
@@ -8878,10 +8821,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
return -EINVAL;
- pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
- if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS)
- return -EBUSY;
-
/* Hardware has to reinitialize queues and interrupts to
* match packet buffer alignment. Unfortunately, the
* hardware is not flexible enough to do this dynamically.
@@ -8898,6 +8837,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
netdev_set_num_tc(dev, tc);
ixgbe_set_prio_tc_map(adapter);
+ adapter->hw_tcs = tc;
adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
@@ -8907,10 +8847,19 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
} else {
netdev_reset_tc(dev);
+ /* To support macvlan offload we have to use num_tc to
+ * restrict the queues that can be used by the device.
+ * By doing this we can avoid reporting a false number of
+ * queues.
+ */
+ if (!tc && adapter->num_rx_pools > 1)
+ netdev_set_num_tc(dev, 1);
+
if (adapter->hw.mac.type == ixgbe_mac_82598EB)
adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+ adapter->hw_tcs = tc;
adapter->temp_dcb_cfg.pfc_mode_enable = false;
adapter->dcb_cfg.pfc_mode_enable = false;
@@ -9044,6 +8993,7 @@ static int get_macvlan_queue(struct net_device *upper, void *_data)
static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
u8 *queue, u64 *action)
{
+ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
unsigned int num_vfs = adapter->num_vfs, vf;
struct upper_walk_data data;
struct net_device *upper;
@@ -9052,11 +9002,7 @@ static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
for (vf = 0; vf < num_vfs; ++vf) {
upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
if (upper->ifindex == ifindex) {
- if (adapter->num_rx_pools > 1)
- *queue = vf * 2;
- else
- *queue = vf * adapter->num_rx_queues_per_pool;
-
+ *queue = vf * __ALIGN_MASK(1, ~vmdq->mask);
*action = vf + 1;
*action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
return 0;
@@ -9101,9 +9047,11 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
/* Redirect to a VF or a offloaded macvlan */
if (is_tcf_mirred_egress_redirect(a)) {
- int ifindex = tcf_mirred_ifindex(a);
+ struct net_device *dev = tcf_mirred_dev(a);
- err = handle_redirect_action(adapter, ifindex, queue,
+ if (!dev)
+ return -EINVAL;
+ err = handle_redirect_action(adapter, dev->ifindex, queue,
action);
if (err == 0)
return err;
@@ -9362,9 +9310,6 @@ free_jump:
static int ixgbe_setup_tc_cls_u32(struct ixgbe_adapter *adapter,
struct tc_cls_u32_offload *cls_u32)
{
- if (cls_u32->common.chain_index)
- return -EOPNOTSUPP;
-
switch (cls_u32->command) {
case TC_CLSU32_NEW_KNODE:
case TC_CLSU32_REPLACE_KNODE:
@@ -9386,7 +9331,7 @@ static int ixgbe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
{
struct ixgbe_adapter *adapter = cb_priv;
- if (!tc_can_offload(adapter->netdev))
+ if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -9444,7 +9389,7 @@ void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
struct net_device *netdev = adapter->netdev;
rtnl_lock();
- ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
+ ixgbe_setup_tc(netdev, adapter->hw_tcs);
rtnl_unlock();
}
@@ -9520,7 +9465,7 @@ static int ixgbe_set_features(struct net_device *netdev,
/* We cannot enable ATR if SR-IOV is enabled */
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED ||
/* We cannot enable ATR if we have 2 or more tcs */
- (netdev_get_num_tc(netdev) > 1) ||
+ (adapter->hw_tcs > 1) ||
/* We cannot enable ATR if RSS is disabled */
(adapter->ring_feature[RING_F_RSS].limit <= 1) ||
/* A sample rate of 0 indicates ATR disabled */
@@ -9695,8 +9640,8 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
/**
* ixgbe_configure_bridge_mode - set various bridge modes
- * @adapter - the private structure
- * @mode - requested bridge mode
+ * @adapter: the private structure
+ * @mode: requested bridge mode
*
* Configure some settings require for various bridge modes.
**/
@@ -9821,6 +9766,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
struct ixgbe_fwd_adapter *fwd_adapter = NULL;
struct ixgbe_adapter *adapter = netdev_priv(pdev);
int used_pools = adapter->num_vfs + adapter->num_rx_pools;
+ int tcs = adapter->hw_tcs ? : 1;
unsigned int limit;
int pool, err;
@@ -9831,24 +9777,8 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
return ERR_PTR(-EINVAL);
-#ifdef CONFIG_RPS
- if (vdev->num_rx_queues != vdev->num_tx_queues) {
- netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n",
- vdev->name);
- return ERR_PTR(-EINVAL);
- }
-#endif
- /* Check for hardware restriction on number of rx/tx queues */
- if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES ||
- vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) {
- netdev_info(pdev,
- "%s: Supports RX/TX Queue counts 1,2, and 4\n",
- pdev->name);
- return ERR_PTR(-EINVAL);
- }
-
if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
- adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) ||
+ adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
(adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
return ERR_PTR(-EBUSY);
@@ -9856,53 +9786,68 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
if (!fwd_adapter)
return ERR_PTR(-ENOMEM);
- pool = find_first_zero_bit(&adapter->fwd_bitmask, 32);
- adapter->num_rx_pools++;
- set_bit(pool, &adapter->fwd_bitmask);
- limit = find_last_bit(&adapter->fwd_bitmask, 32);
+ pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
+ set_bit(pool, adapter->fwd_bitmask);
+ limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools + 1);
/* Enable VMDq flag so device will be set in VM mode */
adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
- adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues;
- /* Force reinit of ring allocation with VMDQ enabled */
- err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
- if (err)
- goto fwd_add_err;
fwd_adapter->pool = pool;
fwd_adapter->real_adapter = adapter;
- if (netif_running(pdev)) {
+ /* Force reinit of ring allocation with VMDQ enabled */
+ err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
+
+ if (!err && netif_running(pdev))
err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
- if (err)
- goto fwd_add_err;
- netif_tx_start_all_queues(vdev);
- }
- return fwd_adapter;
-fwd_add_err:
+ if (!err)
+ return fwd_adapter;
+
/* unwind counter and free adapter struct */
netdev_info(pdev,
"%s: dfwd hardware acceleration failed\n", vdev->name);
- clear_bit(pool, &adapter->fwd_bitmask);
- adapter->num_rx_pools--;
+ clear_bit(pool, adapter->fwd_bitmask);
kfree(fwd_adapter);
return ERR_PTR(err);
}
static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
{
- struct ixgbe_fwd_adapter *fwd_adapter = priv;
- struct ixgbe_adapter *adapter = fwd_adapter->real_adapter;
- unsigned int limit;
+ struct ixgbe_fwd_adapter *accel = priv;
+ struct ixgbe_adapter *adapter = accel->real_adapter;
+ unsigned int rxbase = accel->rx_base_queue;
+ unsigned int limit, i;
+
+ /* delete unicast filter associated with offloaded interface */
+ ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr,
+ VMDQ_P(accel->pool));
+
+ /* disable ability to receive packets for this pool */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(accel->pool), 0);
- clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask);
- adapter->num_rx_pools--;
+ /* Allow remaining Rx packets to get flushed out of the
+ * Rx FIFO before we drop the netdev for the ring.
+ */
+ usleep_range(10000, 20000);
- limit = find_last_bit(&adapter->fwd_bitmask, 32);
+ for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
+ struct ixgbe_ring *ring = adapter->rx_ring[rxbase + i];
+ struct ixgbe_q_vector *qv = ring->q_vector;
+
+ /* Make sure we aren't processing any packets and clear
+ * netdev to shut down the ring.
+ */
+ if (netif_running(adapter->netdev))
+ napi_synchronize(&qv->napi);
+ ring->netdev = NULL;
+ }
+
+ clear_bit(accel->pool, adapter->fwd_bitmask);
+ limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
- ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter);
/* go back to full RSS if we're done with our VMQs */
if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
@@ -9914,13 +9859,13 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
adapter->ring_feature[RING_F_RSS].limit = rss;
}
- ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
- netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
- fwd_adapter->pool, adapter->num_rx_pools,
- fwd_adapter->rx_base_queue,
- fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool,
- adapter->fwd_bitmask);
- kfree(fwd_adapter);
+ ixgbe_setup_tc(pdev, adapter->hw_tcs);
+ netdev_dbg(pdev, "pool %i:%i queues %i:%i\n",
+ accel->pool, adapter->num_rx_pools,
+ accel->rx_base_queue,
+ accel->rx_base_queue +
+ adapter->num_rx_queues_per_pool);
+ kfree(accel);
}
#define IXGBE_MAX_MAC_HDR_LEN 127
@@ -9954,6 +9899,12 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
features &= ~NETIF_F_TSO;
+#ifdef CONFIG_XFRM_OFFLOAD
+ /* IPsec offload doesn't get along well with others *yet* */
+ if (skb->sp)
+ features &= ~(NETIF_F_TSO | NETIF_F_HW_CSUM);
+#endif
+
return features;
}
@@ -9987,7 +9938,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
/* If transitioning XDP modes reconfigure rings */
if (!!prog != !!old_prog) {
- int err = ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
+ int err = ixgbe_setup_tc(dev, adapter->hw_tcs);
if (err) {
rcu_assign_pointer(adapter->xdp_prog, old_prog);
@@ -10164,7 +10115,7 @@ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
* ixgbe_wol_supported - Check whether device supports WoL
* @adapter: the adapter private structure
* @device_id: the device ID
- * @subdev_id: the subsystem device ID
+ * @subdevice_id: the subsystem device ID
*
* This function is used by probe and ethtool to determine
* which devices have WoL support
@@ -10233,6 +10184,41 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
}
/**
+ * ixgbe_set_fw_version - Set FW version
+ * @adapter: the adapter private structure
+ *
+ * This function is used by probe and ethtool to determine the FW version to
+ * format to display. The FW version is taken from the EEPROM/NVM.
+ */
+static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_nvm_version nvm_ver;
+
+ ixgbe_get_oem_prod_version(hw, &nvm_ver);
+ if (nvm_ver.oem_valid) {
+ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ "%x.%x.%x", nvm_ver.oem_major, nvm_ver.oem_minor,
+ nvm_ver.oem_release);
+ return;
+ }
+
+ ixgbe_get_etk_id(hw, &nvm_ver);
+ ixgbe_get_orom_version(hw, &nvm_ver);
+
+ if (nvm_ver.or_valid) {
+ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ "0x%08x, %d.%d.%d", nvm_ver.etk_id, nvm_ver.or_major,
+ nvm_ver.or_build, nvm_ver.or_patch);
+ return;
+ }
+
+ /* Set ETrack ID format */
+ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ "0x%08x", nvm_ver.etk_id);
+}
+
+/**
* ixgbe_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in ixgbe_pci_tbl
@@ -10508,6 +10494,7 @@ skip_sriov:
NETIF_F_FCOE_MTU;
}
#endif /* IXGBE_FCOE */
+ ixgbe_init_ipsec_offload(adapter);
if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
netdev->hw_features |= NETIF_F_LRO;
@@ -10568,8 +10555,7 @@ skip_sriov:
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
/* save off EEPROM version number */
- hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
- hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
+ ixgbe_set_fw_version(adapter);
/* pick up the PCI bus settings for reporting later */
if (ixgbe_pcie_from_parent(hw))
@@ -10744,6 +10730,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
+ ixgbe_stop_ipsec_offload(adapter);
ixgbe_clear_interrupt_scheme(adapter);
ixgbe_release_hw_control(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 654a402f0e9e..91bde90f9265 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -378,7 +378,7 @@ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
/**
* ixgbe_get_phy_type_from_id - Get the phy type
- * @hw: pointer to hardware structure
+ * @phy_id: hardware phy id
*
**/
static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
@@ -489,6 +489,7 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
* the SWFW lock
* @hw: pointer to hardware structure
* @reg_addr: 32 bit address of PHY register to read
+ * @device_type: 5 bit device type
* @phy_data: Pointer to read data from PHY register
**/
s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
@@ -564,6 +565,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
* using the SWFW lock - this function is needed in most cases
* @hw: pointer to hardware structure
* @reg_addr: 32 bit address of PHY register to read
+ * @device_type: 5 bit device type
* @phy_data: Pointer to read data from PHY register
**/
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
@@ -763,6 +765,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
* ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
* @hw: pointer to hardware structure
* @speed: new link speed
+ * @autoneg_wait_to_complete: unused
**/
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
@@ -861,6 +864,8 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
/**
* ixgbe_check_phy_link_tnx - Determine link and speed status
* @hw: pointer to hardware structure
+ * @speed: link speed
+ * @link_up: status of link
*
* Reads the VS1 register to determine if link is up and the current speed for
* the PHY.
@@ -1667,7 +1672,7 @@ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
* ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface
* @hw: pointer to hardware structure
* @byte_offset: byte offset at address 0xA2
- * @eeprom_data: value read
+ * @sff8472_data: value read
*
* Performs byte read operation to SFP module's SFF-8472 data over I2C
**/
@@ -1714,6 +1719,7 @@ static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr)
* ixgbe_read_i2c_byte_generic_int - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
+ * @dev_addr: device address
* @data: value read
* @lock: true if to take and release semaphore
*
@@ -1804,6 +1810,7 @@ fail:
* ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
+ * @dev_addr: device address
* @data: value read
*
* Performs byte read operation to SFP module's EEPROM over I2C interface at
@@ -1820,6 +1827,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
* ixgbe_read_i2c_byte_generic_unlocked - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
+ * @dev_addr: device address
* @data: value read
*
* Performs byte read operation to SFP module's EEPROM over I2C interface at
@@ -1836,6 +1844,7 @@ s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
* ixgbe_write_i2c_byte_generic_int - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
+ * @dev_addr: device address
* @data: value to write
* @lock: true if to take and release semaphore
*
@@ -1904,6 +1913,7 @@ fail:
* ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
+ * @dev_addr: device address
* @data: value to write
*
* Performs byte write operation to SFP module's EEPROM over I2C interface at
@@ -1920,6 +1930,7 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
* ixgbe_write_i2c_byte_generic_unlocked - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
+ * @dev_addr: device address
* @data: value to write
*
* Performs byte write operation to SFP module's EEPROM over I2C interface at
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index ae312c45696a..f6cc9166082a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -166,7 +166,7 @@
/**
* ixgbe_ptp_setup_sdp_x540
- * @hw: the hardware private structure
+ * @adapter: private adapter structure
*
* this function enables or disables the clock out feature on SDP0 for
* the X540 device. It will create a 1second periodic output that can
@@ -299,7 +299,7 @@ static u64 ixgbe_ptp_read_82599(const struct cyclecounter *cc)
* ixgbe_ptp_convert_to_hwtstamp - convert register value to hw timestamp
* @adapter: private adapter structure
* @hwtstamp: stack timestamp structure
- * @systim: unsigned 64bit system time value
+ * @timestamp: unsigned 64bit system time value
*
* We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value
* which can be used by the stack's ptp functions.
@@ -1015,7 +1015,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
/**
* ixgbe_ptp_set_ts_config - user entry point for timestamp mode
* @adapter: pointer to adapter struct
- * @ifreq: ioctl data
+ * @ifr: ioctl data
*
* Set hardware to requested mode. If unsupported, return an error with no
* changes. Otherwise, store the mode for future reference.
@@ -1338,7 +1338,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
/**
* ixgbe_ptp_suspend - stop PTP work items
- * @ adapter: pointer to adapter struct
+ * @adapter: pointer to adapter struct
*
* this function suspends PTP activity, and prevents more PTP work from being
* generated, but does not destroy the PTP clock device.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 112d24c6c9ce..27a70a52f3c9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -78,12 +78,9 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
struct ixgbe_hw *hw = &adapter->hw;
int i;
- adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
-
/* Enable VMDq flag so device will be set in VM mode */
- adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED;
- if (!adapter->ring_feature[RING_F_VMDQ].limit)
- adapter->ring_feature[RING_F_VMDQ].limit = 1;
+ adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED |
+ IXGBE_FLAG_VMDQ_ENABLED;
/* Allocate memory for per VF control structures */
adapter->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
@@ -227,9 +224,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs)
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
{
unsigned int num_vfs = adapter->num_vfs, vf;
- struct ixgbe_hw *hw = &adapter->hw;
- u32 gpie;
- u32 vmdctl;
int rss;
/* set num VFs to 0 to prevent access to vfinfo */
@@ -271,18 +265,6 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
pci_disable_sriov(adapter->pdev);
#endif
- /* turn off device IOV mode */
- IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0);
- gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
- gpie &= ~IXGBE_GPIE_VTMODE_MASK;
- IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
-
- /* set default pool back to 0 */
- vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
- vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
- IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
- IXGBE_WRITE_FLUSH(hw);
-
/* Disable VMDq flag so device will be set in VM mode */
if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
@@ -305,10 +287,9 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
{
#ifdef CONFIG_PCI_IOV
struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
- int err = 0;
- u8 num_tc;
- int i;
int pre_existing_vfs = pci_num_vf(dev);
+ int err = 0, num_rx_pools, i, limit;
+ u8 num_tc;
if (pre_existing_vfs && pre_existing_vfs != num_vfs)
err = ixgbe_disable_sriov(adapter);
@@ -330,23 +311,15 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
* than we have available pools. The PCI bus driver already checks for
* other values out of range.
*/
- num_tc = netdev_get_num_tc(adapter->netdev);
-
- if (num_tc > 4) {
- if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_8TC) {
- e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_8TC);
- return -EPERM;
- }
- } else if ((num_tc > 1) && (num_tc <= 4)) {
- if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_4TC) {
- e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_4TC);
- return -EPERM;
- }
- } else {
- if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_1TC) {
- e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_1TC);
- return -EPERM;
- }
+ num_tc = adapter->hw_tcs;
+ num_rx_pools = adapter->num_rx_pools;
+ limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC :
+ (num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC;
+
+ if (num_vfs > (limit - num_rx_pools)) {
+ e_dev_err("Currently configured with %d TCs, and %d offloaded macvlans. Creating more than %d VFs is not allowed\n",
+ num_tc, num_rx_pools - 1, limit - num_rx_pools);
+ return -EPERM;
}
err = __ixgbe_enable_sriov(adapter, num_vfs);
@@ -378,13 +351,15 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
int err;
#ifdef CONFIG_PCI_IOV
u32 current_flags = adapter->flags;
+ int prev_num_vf = pci_num_vf(dev);
#endif
err = ixgbe_disable_sriov(adapter);
/* Only reinit if no error and state changed */
#ifdef CONFIG_PCI_IOV
- if (!err && current_flags != adapter->flags)
+ if (!err && (current_flags != adapter->flags ||
+ prev_num_vf != pci_num_vf(dev)))
ixgbe_sriov_reinit(adapter);
#endif
@@ -738,7 +713,7 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
- u8 num_tcs = netdev_get_num_tc(adapter->netdev);
+ u8 num_tcs = adapter->hw_tcs;
/* remove VLAN filters beloning to this VF */
ixgbe_clear_vf_vlans(adapter, vf);
@@ -946,7 +921,7 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
{
u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
- u8 tcs = netdev_get_num_tc(adapter->netdev);
+ u8 tcs = adapter->hw_tcs;
if (adapter->vfinfo[vf].pf_vlan || tcs) {
e_warn(drv,
@@ -1034,7 +1009,7 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
struct net_device *dev = adapter->netdev;
struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
unsigned int default_tc = 0;
- u8 num_tcs = netdev_get_num_tc(dev);
+ u8 num_tcs = adapter->hw_tcs;
/* verify the PF is supporting the correct APIs */
switch (adapter->vfinfo[vf].vf_api) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index ffa0ee5cd0f5..ca45359686d3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -235,6 +235,45 @@ struct ixgbe_thermal_sensor_data {
struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS];
};
+#define NVM_OROM_OFFSET 0x17
+#define NVM_OROM_BLK_LOW 0x83
+#define NVM_OROM_BLK_HI 0x84
+#define NVM_OROM_PATCH_MASK 0xFF
+#define NVM_OROM_SHIFT 8
+
+#define NVM_VER_MASK 0x00FF /* version mask */
+#define NVM_VER_SHIFT 8 /* version bit shift */
+#define NVM_OEM_PROD_VER_PTR 0x1B /* OEM Product version block pointer */
+#define NVM_OEM_PROD_VER_CAP_OFF 0x1 /* OEM Product version format offset */
+#define NVM_OEM_PROD_VER_OFF_L 0x2 /* OEM Product version offset low */
+#define NVM_OEM_PROD_VER_OFF_H 0x3 /* OEM Product version offset high */
+#define NVM_OEM_PROD_VER_CAP_MASK 0xF /* OEM Product version cap mask */
+#define NVM_OEM_PROD_VER_MOD_LEN 0x3 /* OEM Product version module length */
+#define NVM_ETK_OFF_LOW 0x2D /* version low order word */
+#define NVM_ETK_OFF_HI 0x2E /* version high order word */
+#define NVM_ETK_SHIFT 16 /* high version word shift */
+#define NVM_VER_INVALID 0xFFFF
+#define NVM_ETK_VALID 0x8000
+#define NVM_INVALID_PTR 0xFFFF
+#define NVM_VER_SIZE 32 /* version sting size */
+
+struct ixgbe_nvm_version {
+ u32 etk_id;
+ u8 nvm_major;
+ u16 nvm_minor;
+ u8 nvm_id;
+
+ bool oem_valid;
+ u8 oem_major;
+ u8 oem_minor;
+ u16 oem_release;
+
+ bool or_valid;
+ u8 or_major;
+ u16 or_build;
+ u8 or_patch;
+};
+
/* Interrupt Registers */
#define IXGBE_EICR 0x00800
#define IXGBE_EICS 0x00808
@@ -2321,11 +2360,6 @@ enum {
#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
-#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000
-#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000
-#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000
-#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED 0x18000000
-#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000
/* Multiple Transmit Queue Command Register */
#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */
#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */
@@ -2377,6 +2411,9 @@ enum {
#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */
+#define IXGBE_RXDADV_ERR_IPSEC_INV_PROTOCOL 0x08000000 /* overlap ERR_PE */
+#define IXGBE_RXDADV_ERR_IPSEC_INV_LENGTH 0x10000000 /* overlap ERR_OSE */
+#define IXGBE_RXDADV_ERR_IPSEC_AUTH_FAILED 0x18000000
#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */
#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */
#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */
@@ -2398,6 +2435,7 @@ enum {
#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
#define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE 1588 Time Stamp */
+#define IXGBE_RXDADV_STAT_SECP 0x00020000 /* IPsec/MACsec pkt found */
/* PSRTYPE bit definitions */
#define IXGBE_PSRTYPE_TCPHDR 0x00000010
@@ -2464,13 +2502,6 @@ enum {
#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */
#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */
-/* Security Processing bit Indication */
-#define IXGBE_RXDADV_LNKSEC_STATUS_SECP 0x00020000
-#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000
-#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000
-#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000
-#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000
-
/* Masks to determine if packets should be dropped due to frame errors */
#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
IXGBE_RXD_ERR_CE | \
@@ -2484,6 +2515,8 @@ enum {
IXGBE_RXDADV_ERR_LE | \
IXGBE_RXDADV_ERR_PE | \
IXGBE_RXDADV_ERR_OSE | \
+ IXGBE_RXDADV_ERR_IPSEC_INV_PROTOCOL | \
+ IXGBE_RXDADV_ERR_IPSEC_INV_LENGTH | \
IXGBE_RXDADV_ERR_USE)
/* Multicast bit mask */
@@ -2862,7 +2895,7 @@ union ixgbe_adv_rx_desc {
/* Context descriptors */
struct ixgbe_adv_tx_context_desc {
__le32 vlan_macip_lens;
- __le32 seqnum_seed;
+ __le32 fceof_saidx;
__le32 type_tucmd_mlhl;
__le32 mss_l4len_idx;
};
@@ -2893,6 +2926,7 @@ struct ixgbe_adv_tx_context_desc {
IXGBE_ADVTXD_POPTS_SHIFT)
#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
@@ -2908,7 +2942,6 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
#define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* RSV L4 Packet TYPE */
#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/
-#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */
#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index cb7da5f9c4da..f470d0204771 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -949,7 +949,7 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
u16 length, bufsz, i, start;
u16 *local_buffer;
- bufsz = sizeof(buf) / sizeof(buf[0]);
+ bufsz = ARRAY_SIZE(buf);
/* Read a chunk at the pointer location */
if (!buffer) {
@@ -1642,10 +1642,12 @@ static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
}
/**
- * ixgbe_setup_mac_link_sfp_x550em - Configure the KR PHY for SFP.
- * @hw: pointer to hardware structure
+ * ixgbe_setup_mac_link_sfp_x550em - Configure the KR PHY for SFP.
+ * @hw: pointer to hardware structure
+ * @speed: the link speed to force
+ * @autoneg_wait_to_complete: unused
*
- * Configures the extern PHY and the integrated KR PHY for SFP support.
+ * Configures the extern PHY and the integrated KR PHY for SFP support.
*/
static s32
ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
@@ -1737,6 +1739,8 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
/**
* ixgbe_setup_mac_link_sfp_n - Setup internal PHY for native SFP
* @hw: pointer to hardware structure
+ * @speed: link speed
+ * @autoneg_wait_to_complete: unused
*
* Configure the the integrated PHY for native SFP support.
*/
@@ -1784,6 +1788,8 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
/**
* ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
* @hw: pointer to hardware structure
+ * @speed: link speed
+ * @autoneg_wait_to_complete: unused
*
* Configure the the integrated PHY for SFP support.
*/
@@ -1859,7 +1865,7 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
* ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg_wait_to_complete: true when waiting for completion is needed
+ * @autoneg_wait: true when waiting for completion is needed
*
* Setup internal/external PHY link speed based on link speed, then set
* external PHY auto advertised link speed.
@@ -1943,6 +1949,8 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
/**
* ixgbe_setup_sgmii - Set up link for sgmii
* @hw: pointer to hardware structure
+ * @speed: unused
+ * @autoneg_wait_to_complete: unused
*/
static s32
ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
@@ -2014,6 +2022,8 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
/**
* ixgbe_setup_sgmii_fw - Set up link for sgmii with firmware-controlled PHYs
* @hw: pointer to hardware structure
+ * @speed: the link speed to force
+ * @autoneg_wait: true when waiting for completion is needed
*/
static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait)
@@ -3735,6 +3745,7 @@ static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
* ixgbe_read_phy_reg_x550a - Reads specified PHY register
* @hw: pointer to hardware structure
* @reg_addr: 32 bit address of PHY register to read
+ * @device_type: 5 bit device type
* @phy_data: Pointer to read data from PHY register
*
* Reads a value from a specified PHY register using the SWFW lock and PHY
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index ff9d05f308ee..4400e49090b4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -75,6 +75,9 @@ static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
IXGBEVF_STAT("tx_timeout_count", tx_timeout_count),
IXGBEVF_NETDEV_STAT(multicast),
IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error),
+ IXGBEVF_STAT("alloc_rx_page", alloc_rx_page),
+ IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed),
+ IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
};
#define IXGBEVF_QUEUE_STATS_LEN ( \
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 581f44bbd7b3..f6952425c87d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -62,7 +62,12 @@ struct ixgbevf_tx_buffer {
struct ixgbevf_rx_buffer {
dma_addr_t dma;
struct page *page;
- unsigned int page_offset;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+ __u32 page_offset;
+#else
+ __u16 page_offset;
+#endif
+ __u16 pagecnt_bias;
};
struct ixgbevf_stats {
@@ -79,6 +84,7 @@ struct ixgbevf_tx_queue_stats {
struct ixgbevf_rx_queue_stats {
u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed;
+ u64 alloc_rx_page;
u64 csum_err;
};
@@ -260,6 +266,9 @@ static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value)
#define MIN_MSIX_Q_VECTORS 1
#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
+#define IXGBEVF_RX_DMA_ATTR \
+ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
/* board specific private data structure */
struct ixgbevf_adapter {
/* this field must be first, see ixgbevf_process_skb_fields */
@@ -287,8 +296,9 @@ struct ixgbevf_adapter {
u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources;
int num_msix_vectors;
- u32 alloc_rx_page_failed;
- u32 alloc_rx_buff_failed;
+ u64 alloc_rx_page_failed;
+ u64 alloc_rx_buff_failed;
+ u64 alloc_rx_page;
struct msix_entry *msix_entries;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 1f4a69134ade..9b3d43d28106 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -206,28 +206,6 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
}
}
-static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
- struct ixgbevf_tx_buffer *tx_buffer)
-{
- if (tx_buffer->skb) {
- dev_kfree_skb_any(tx_buffer->skb);
- if (dma_unmap_len(tx_buffer, len))
- dma_unmap_single(tx_ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- } else if (dma_unmap_len(tx_buffer, len)) {
- dma_unmap_page(tx_ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- }
- tx_buffer->next_to_watch = NULL;
- tx_buffer->skb = NULL;
- dma_unmap_len_set(tx_buffer, len, 0);
- /* tx_buffer must be completely set up in the transmit path */
-}
-
static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
{
return ring->stats.packets;
@@ -349,7 +327,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
DMA_TO_DEVICE);
/* clear tx_buffer data */
- tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0);
/* unmap remaining buffers */
@@ -554,7 +531,6 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
* ixgbevf_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer
- * @skb: current socket buffer containing buffer in progress
*
* This function updates next to clean. If the buffer is an EOP buffer
* this function exits returning false, otherwise it will place the
@@ -596,8 +572,8 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
}
/* map page for use */
- dma = dma_map_page(rx_ring->dev, page, 0,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
@@ -605,13 +581,15 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
if (dma_mapping_error(rx_ring->dev, dma)) {
__free_page(page);
- rx_ring->rx_stats.alloc_rx_buff_failed++;
+ rx_ring->rx_stats.alloc_rx_page_failed++;
return false;
}
bi->dma = dma;
bi->page = page;
bi->page_offset = 0;
+ bi->pagecnt_bias = 1;
+ rx_ring->rx_stats.alloc_rx_page++;
return true;
}
@@ -640,6 +618,12 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
break;
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset,
+ IXGBEVF_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
/* Refresh the desc even if pkt_addr didn't change
* because each write-back erases this info.
*/
@@ -654,8 +638,8 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
i -= rx_ring->count;
}
- /* clear the hdr_addr for the next_to_use descriptor */
- rx_desc->read.hdr_addr = 0;
+ /* clear the length for the next_to_use descriptor */
+ rx_desc->wb.upper.length = 0;
cleaned_count--;
} while (cleaned_count);
@@ -742,12 +726,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
new_buff->page = old_buff->page;
new_buff->dma = old_buff->dma;
new_buff->page_offset = old_buff->page_offset;
-
- /* sync the buffer for use by the device */
- dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
- new_buff->page_offset,
- IXGBEVF_RX_BUFSZ,
- DMA_FROM_DEVICE);
+ new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}
static inline bool ixgbevf_page_is_reserved(struct page *page)
@@ -755,6 +734,45 @@ static inline bool ixgbevf_page_is_reserved(struct page *page)
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
+static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,
+ struct page *page,
+ const unsigned int truesize)
+{
+ unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
+
+ /* avoid re-using remote pages */
+ if (unlikely(ixgbevf_page_is_reserved(page)))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_ref_count(page) != pagecnt_bias))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
+
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+
+ if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
+ return false;
+
+#endif
+
+ /* If we have drained the page fragment pool we need to update
+ * the pagecnt_bias and page count so that we fully restock the
+ * number of references the driver holds.
+ */
+ if (unlikely(pagecnt_bias == 1)) {
+ page_ref_add(page, USHRT_MAX);
+ rx_buffer->pagecnt_bias = USHRT_MAX;
+ }
+
+ return true;
+}
+
/**
* ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
@@ -772,12 +790,12 @@ static inline bool ixgbevf_page_is_reserved(struct page *page)
**/
static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
struct ixgbevf_rx_buffer *rx_buffer,
+ u16 size,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
struct page *page = rx_buffer->page;
unsigned char *va = page_address(page) + rx_buffer->page_offset;
- unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
#if (PAGE_SIZE < 8192)
unsigned int truesize = IXGBEVF_RX_BUFSZ;
#else
@@ -796,7 +814,6 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
return true;
/* this page cannot be reused so discard it */
- put_page(page);
return false;
}
@@ -816,32 +833,7 @@ add_tail_frag:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
(unsigned long)va & ~PAGE_MASK, size, truesize);
- /* avoid re-using remote pages */
- if (unlikely(ixgbevf_page_is_reserved(page)))
- return false;
-
-#if (PAGE_SIZE < 8192)
- /* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
- return false;
-
- /* flip page offset to other buffer */
- rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
-
-#else
- /* move offset up to the next cache line */
- rx_buffer->page_offset += truesize;
-
- if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
- return false;
-
-#endif
- /* Even if we own the page, we are not allowed to use atomic_set()
- * This would break get_page_unless_zero() users.
- */
- page_ref_inc(page);
-
- return true;
+ return ixgbevf_can_reuse_rx_page(rx_buffer, page, truesize);
}
static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
@@ -850,11 +842,19 @@ static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
{
struct ixgbevf_rx_buffer *rx_buffer;
struct page *page;
+ u16 size = le16_to_cpu(rx_desc->wb.upper.length);
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
page = rx_buffer->page;
prefetchw(page);
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ size,
+ DMA_FROM_DEVICE);
+
if (likely(!skb)) {
void *page_addr = page_address(page) +
rx_buffer->page_offset;
@@ -880,21 +880,18 @@ static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
prefetchw(skb->data);
}
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- IXGBEVF_RX_BUFSZ,
- DMA_FROM_DEVICE);
-
/* pull page into skb */
- if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) {
/* hand second half of page back to the ring */
ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
} else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buffer->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ /* We are not reusing the buffer so unmap it and free
+ * any references we are holding to it
+ */
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE,
+ IXGBEVF_RX_DMA_ATTR);
+ __page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
}
/* clear contents of buffer_info */
@@ -931,7 +928,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
- if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
+ if (!rx_desc->wb.upper.length)
break;
/* This memory barrier is needed to keep us from reading
@@ -944,8 +941,10 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb);
/* exit if we failed to retrieve a buffer */
- if (!skb)
+ if (!skb) {
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
break;
+ }
cleaned_count++;
@@ -1554,6 +1553,10 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
txdctl |= (1u << 8) | /* HTHRESH = 1 */
32; /* PTHRESH = 32 */
+ /* reinitialize tx_buffer_info */
+ memset(ring->tx_buffer_info, 0,
+ sizeof(struct ixgbevf_tx_buffer) * ring->count);
+
clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
@@ -1722,6 +1725,7 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
struct ixgbevf_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
+ union ixgbe_adv_rx_desc *rx_desc;
u64 rdba = ring->dma;
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
@@ -1750,6 +1754,14 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
+ /* initialize rx_buffer_info */
+ memset(ring->rx_buffer_info, 0,
+ sizeof(struct ixgbevf_rx_buffer) * ring->count);
+
+ /* initialize Rx descriptor 0 */
+ rx_desc = IXGBEVF_RX_DESC(ring, 0);
+ rx_desc->wb.upper.length = 0;
+
/* reset ntu and ntc to place SW in sync with hardwdare */
ring->next_to_clean = 0;
ring->next_to_use = 0;
@@ -1896,10 +1908,6 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
unsigned int flags = netdev->flags;
int xcast_mode;
- xcast_mode = (flags & IFF_ALLMULTI) ? IXGBEVF_XCAST_MODE_ALLMULTI :
- (flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
- IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE;
-
/* request the most inclusive mode we need */
if (flags & IFF_PROMISC)
xcast_mode = IXGBEVF_XCAST_MODE_PROMISC;
@@ -2108,9 +2116,7 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
**/
static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
{
- struct device *dev = rx_ring->dev;
- unsigned long size;
- unsigned int i;
+ u16 i = rx_ring->next_to_clean;
/* Free Rx ring sk_buff */
if (rx_ring->skb) {
@@ -2118,29 +2124,39 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
rx_ring->skb = NULL;
}
- /* ring already cleared, nothing to do */
- if (!rx_ring->rx_buffer_info)
- return;
-
/* Free all the Rx ring pages */
- for (i = 0; i < rx_ring->count; i++) {
+ while (i != rx_ring->next_to_alloc) {
struct ixgbevf_rx_buffer *rx_buffer;
rx_buffer = &rx_ring->rx_buffer_info[i];
- if (rx_buffer->dma)
- dma_unmap_page(dev, rx_buffer->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
- rx_buffer->dma = 0;
- if (rx_buffer->page)
- __free_page(rx_buffer->page);
- rx_buffer->page = NULL;
- }
- size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
- memset(rx_ring->rx_buffer_info, 0, size);
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ IXGBEVF_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
+ /* free resources associated with mapping */
+ dma_unmap_page_attrs(rx_ring->dev,
+ rx_buffer->dma,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE,
+ IXGBEVF_RX_DMA_ATTR);
+
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
- /* Zero out the descriptor ring */
- memset(rx_ring->desc, 0, rx_ring->size);
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ }
+
+ rx_ring->next_to_alloc = 0;
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
}
/**
@@ -2149,23 +2165,57 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
**/
static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
{
- struct ixgbevf_tx_buffer *tx_buffer_info;
- unsigned long size;
- unsigned int i;
+ u16 i = tx_ring->next_to_clean;
+ struct ixgbevf_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
- if (!tx_ring->tx_buffer_info)
- return;
+ while (i != tx_ring->next_to_use) {
+ union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
- /* Free all the Tx ring sk_buffs */
- for (i = 0; i < tx_ring->count; i++) {
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
+ /* Free all the Tx ring sk_buffs */
+ dev_kfree_skb_any(tx_buffer->skb);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+
+ /* check for eop_desc to determine the end of the packet */
+ eop_desc = tx_buffer->next_to_watch;
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ }
}
- size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
- memset(tx_ring->tx_buffer_info, 0, size);
+ /* reset next_to_use and next_to_clean */
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
- memset(tx_ring->desc, 0, tx_ring->size);
}
/**
@@ -2717,6 +2767,8 @@ out:
void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
+ u64 alloc_rx_page = 0, hw_csum_rx_error = 0;
int i;
if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
@@ -2737,15 +2789,23 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
adapter->stats.vfmprc);
for (i = 0; i < adapter->num_rx_queues; i++) {
- adapter->hw_csum_rx_error +=
- adapter->rx_ring[i]->hw_csum_rx_error;
- adapter->rx_ring[i]->hw_csum_rx_error = 0;
+ struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
+
+ hw_csum_rx_error += rx_ring->rx_stats.csum_err;
+ alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
+ alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
+ alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
}
+
+ adapter->hw_csum_rx_error = hw_csum_rx_error;
+ adapter->alloc_rx_page_failed = alloc_rx_page_failed;
+ adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
+ adapter->alloc_rx_page = alloc_rx_page;
}
/**
* ixgbevf_service_timer - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
+ * @t: pointer to timer_list struct
**/
static void ixgbevf_service_timer(struct timer_list *t)
{
@@ -2888,7 +2948,7 @@ static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
/**
* ixgbevf_watchdog_subtask - worker thread to bring link up
- * @work: pointer to work_struct containing our data
+ * @adapter: board private structure
**/
static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
{
@@ -2985,7 +3045,7 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
int size;
size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
- tx_ring->tx_buffer_info = vzalloc(size);
+ tx_ring->tx_buffer_info = vmalloc(size);
if (!tx_ring->tx_buffer_info)
goto err;
@@ -3045,7 +3105,7 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
int size;
size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
- rx_ring->rx_buffer_info = vzalloc(size);
+ rx_ring->rx_buffer_info = vmalloc(size);
if (!rx_ring->rx_buffer_info)
goto err;
@@ -3487,34 +3547,37 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first,
const u8 hdr_len)
{
- dma_addr_t dma;
struct sk_buff *skb = first->skb;
struct ixgbevf_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned int data_len = skb->data_len;
- unsigned int size = skb_headlen(skb);
- unsigned int paylen = skb->len - hdr_len;
+ struct skb_frag_struct *frag;
+ dma_addr_t dma;
+ unsigned int data_len, size;
u32 tx_flags = first->tx_flags;
- __le32 cmd_type;
+ __le32 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
u16 i = tx_ring->next_to_use;
tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
- ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
- cmd_type = ixgbevf_tx_cmd_type(tx_flags);
+ ixgbevf_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
+
+ size = skb_headlen(skb);
+ data_len = skb->data_len;
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
- goto dma_error;
- /* record length, and DMA address */
- dma_unmap_len_set(first, len, size);
- dma_unmap_addr_set(first, dma, dma);
+ tx_buffer = first;
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buffer, len, size);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
- for (;;) {
while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
tx_desc->read.cmd_type_len =
cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
@@ -3525,12 +3588,12 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
i = 0;
}
+ tx_desc->read.olinfo_status = 0;
dma += IXGBE_MAX_DATA_PER_TXD;
size -= IXGBE_MAX_DATA_PER_TXD;
tx_desc->read.buffer_addr = cpu_to_le64(dma);
- tx_desc->read.olinfo_status = 0;
}
if (likely(!data_len))
@@ -3544,23 +3607,15 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
i = 0;
}
+ tx_desc->read.olinfo_status = 0;
size = skb_frag_size(frag);
data_len -= size;
dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
- goto dma_error;
tx_buffer = &tx_ring->tx_buffer_info[i];
- dma_unmap_len_set(tx_buffer, len, size);
- dma_unmap_addr_set(tx_buffer, dma, dma);
-
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
- tx_desc->read.olinfo_status = 0;
-
- frag++;
}
/* write last descriptor with RS and EOP bits */
@@ -3594,18 +3649,32 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
return;
dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n");
+ tx_buffer = &tx_ring->tx_buffer_info[i];
/* clear dma mappings for failed tx_buffer_info map */
- for (;;) {
+ while (tx_buffer != first) {
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ if (i-- == 0)
+ i += tx_ring->count;
tx_buffer = &tx_ring->tx_buffer_info[i];
- ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
- if (tx_buffer == first)
- break;
- if (i == 0)
- i = tx_ring->count;
- i--;
}
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ dev_kfree_skb_any(tx_buffer->skb);
+ tx_buffer->skb = NULL;
+
tx_ring->next_to_use = i;
}
@@ -4368,6 +4437,7 @@ static void __exit ixgbevf_exit_module(void)
/**
* ixgbevf_get_hw_dev_name - return device name string
* used by hardware layer to print debugging information
+ * @hw: pointer to private hardware struct
**/
char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
{
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 0c25006ce9af..38d3a327c1bc 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -146,6 +146,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
/**
* Hyper-V variant; the VF/PF communication is through the PCI
* config space.
+ * @hw: pointer to private hardware struct
*/
static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
{
@@ -285,7 +286,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
ether_addr_copy(msg_addr, addr);
ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
- sizeof(msgbuf) / sizeof(u32));
+ ARRAY_SIZE(msgbuf));
if (!ret_val) {
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
@@ -303,7 +304,7 @@ static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
/**
* ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
- * @adapter: pointer to the port handle
+ * @hw: pointer to hardware structure
* @reta: buffer to fill with RETA contents.
* @num_rx_queues: Number of Rx queues configured for this port
*
@@ -455,8 +456,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
ether_addr_copy(msg_addr, addr);
ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
- sizeof(msgbuf) / sizeof(u32));
-
+ ARRAY_SIZE(msgbuf));
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/* if nacked the address was rejected, use "perm_addr" */
@@ -536,6 +536,8 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
/**
* Hyper-V variant - just a stub.
+ * @hw: unused
+ * @netdev: unused
*/
static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
struct net_device *netdev)
@@ -571,7 +573,7 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
msgbuf[1] = xcast_mode;
err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
- sizeof(msgbuf) / sizeof(u32));
+ ARRAY_SIZE(msgbuf));
if (err)
return err;
@@ -584,6 +586,8 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
/**
* Hyper-V variant - just a stub.
+ * @hw: unused
+ * @xcast_mode: unused
*/
static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
{
@@ -609,7 +613,7 @@ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
- sizeof(msgbuf) / sizeof(u32));
+ ARRAY_SIZE(msgbuf));
if (err)
goto mbx_err;
@@ -626,6 +630,10 @@ mbx_err:
/**
* Hyper-V variant - just a stub.
+ * @hw: unused
+ * @vlan: unused
+ * @vind: unused
+ * @vlan_on: unused
*/
static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on)
@@ -655,7 +663,7 @@ static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
* @hw: pointer to hardware structure
* @speed: pointer to link speed
* @link_up: true is link is up, false otherwise
- * @autoneg_wait_to_complete: true when waiting for completion is needed
+ * @autoneg_wait_to_complete: unused
*
* Reads the links register to determine if link is up and the current speed
**/
@@ -740,6 +748,10 @@ out:
/**
* Hyper-V variant; there is no mailbox communication.
+ * @hw: pointer to private hardware struct
+ * @speed: pointer to link speed
+ * @link_up: true is link is up, false otherwise
+ * @autoneg_wait_to_complete: unused
*/
static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
@@ -813,7 +825,7 @@ static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
msgbuf[1] = max_size;
ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
- sizeof(msgbuf) / sizeof(u32));
+ ARRAY_SIZE(msgbuf));
if (ret_val)
return ret_val;
if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
@@ -859,8 +871,7 @@ static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
msg[1] = api;
msg[2] = 0;
- err = ixgbevf_write_msg_read_ack(hw, msg, msg,
- sizeof(msg) / sizeof(u32));
+ err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
@@ -911,8 +922,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
msg[0] = IXGBE_VF_GET_QUEUE;
msg[1] = msg[2] = msg[3] = msg[4] = 0;
- err = ixgbevf_write_msg_read_ack(hw, msg, msg,
- sizeof(msg) / sizeof(u32));
+ err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index da6fb825afea..ebe5c9148935 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -60,7 +60,7 @@ config MVNETA
depends on ARCH_MVEBU || COMPILE_TEST
depends on HAS_DMA
select MVMDIO
- select FIXED_PHY
+ select PHYLINK
---help---
This driver supports the network interface units in the
Marvell ARMADA XP, ARMADA 370, ARMADA 38x and
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index a539263cd79c..25e9a551cc8c 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -28,7 +28,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/phy.h>
-#include <linux/phy_fixed.h>
+#include <linux/phylink.h>
#include <linux/platform_device.h>
#include <linux/skbuff.h>
#include <net/hwbm.h>
@@ -189,6 +189,7 @@
#define MVNETA_GMAC_CTRL_0 0x2c00
#define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
+#define MVNETA_GMAC0_PORT_1000BASE_X BIT(1)
#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
#define MVNETA_GMAC_CTRL_2 0x2c08
#define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0)
@@ -204,13 +205,19 @@
#define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
#define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
#define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
+#define MVNETA_GMAC_AN_COMPLETE BIT(11)
+#define MVNETA_GMAC_SYNC_OK BIT(14)
#define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
#define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
#define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2)
+#define MVNETA_GMAC_AN_BYPASS_ENABLE BIT(3)
+#define MVNETA_GMAC_INBAND_RESTART_AN BIT(4)
#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
+#define MVNETA_GMAC_CONFIG_FLOW_CTRL BIT(8)
+#define MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL BIT(9)
#define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
@@ -237,6 +244,12 @@
#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
#define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
+#define MVNETA_LPI_CTRL_0 0x2cc0
+#define MVNETA_LPI_CTRL_1 0x2cc4
+#define MVNETA_LPI_REQUEST_ENABLE BIT(0)
+#define MVNETA_LPI_CTRL_2 0x2cc8
+#define MVNETA_LPI_STATUS 0x2ccc
+
#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
/* Descriptor ring Macros */
@@ -313,6 +326,11 @@
#define MVNETA_RX_GET_BM_POOL_ID(rxd) \
(((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
+enum {
+ ETHTOOL_STAT_EEE_WAKEUP,
+ ETHTOOL_MAX_STATS,
+};
+
struct mvneta_statistic {
unsigned short offset;
unsigned short type;
@@ -321,6 +339,7 @@ struct mvneta_statistic {
#define T_REG_32 32
#define T_REG_64 64
+#define T_SW 1
static const struct mvneta_statistic mvneta_statistics[] = {
{ 0x3000, T_REG_64, "good_octets_received", },
@@ -355,6 +374,7 @@ static const struct mvneta_statistic mvneta_statistics[] = {
{ 0x304c, T_REG_32, "broadcast_frames_sent", },
{ 0x3054, T_REG_32, "fc_sent", },
{ 0x300c, T_REG_32, "internal_mac_transmit_err", },
+ { ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
};
struct mvneta_pcpu_stats {
@@ -407,20 +427,20 @@ struct mvneta_port {
u16 tx_ring_size;
u16 rx_ring_size;
- struct mii_bus *mii_bus;
phy_interface_t phy_interface;
- struct device_node *phy_node;
- unsigned int link;
- unsigned int duplex;
- unsigned int speed;
+ struct device_node *dn;
unsigned int tx_csum_limit;
- unsigned int use_inband_status:1;
+ struct phylink *phylink;
struct mvneta_bm *bm_priv;
struct mvneta_bm_pool *pool_long;
struct mvneta_bm_pool *pool_short;
int bm_win_id;
+ bool eee_enabled;
+ bool eee_active;
+ bool tx_lpi_enabled;
+
u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
@@ -1214,10 +1234,6 @@ static void mvneta_port_disable(struct mvneta_port *pp)
val &= ~MVNETA_GMAC0_PORT_ENABLE;
mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
- pp->link = 0;
- pp->duplex = -1;
- pp->speed = 0;
-
udelay(200);
}
@@ -1277,44 +1293,6 @@ static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
}
-static void mvneta_set_autoneg(struct mvneta_port *pp, int enable)
-{
- u32 val;
-
- if (enable) {
- val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
- val &= ~(MVNETA_GMAC_FORCE_LINK_PASS |
- MVNETA_GMAC_FORCE_LINK_DOWN |
- MVNETA_GMAC_AN_FLOW_CTRL_EN);
- val |= MVNETA_GMAC_INBAND_AN_ENABLE |
- MVNETA_GMAC_AN_SPEED_EN |
- MVNETA_GMAC_AN_DUPLEX_EN;
- mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
-
- val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
- val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
- mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
-
- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
- val |= MVNETA_GMAC2_INBAND_AN_ENABLE;
- mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
- } else {
- val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
- val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE |
- MVNETA_GMAC_AN_SPEED_EN |
- MVNETA_GMAC_AN_DUPLEX_EN);
- mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
-
- val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
- val &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
- mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
-
- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
- val &= ~MVNETA_GMAC2_INBAND_AN_ENABLE;
- mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
- }
-}
-
static void mvneta_percpu_unmask_interrupt(void *arg)
{
struct mvneta_port *pp = arg;
@@ -1467,7 +1445,6 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
val &= ~MVNETA_PHY_POLLING_ENABLE;
mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
- mvneta_set_autoneg(pp, pp->use_inband_status);
mvneta_set_ucast_table(pp, -1);
mvneta_set_special_mcast_table(pp, -1);
mvneta_set_other_mcast_table(pp, -1);
@@ -2692,26 +2669,11 @@ static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int mvneta_fixed_link_update(struct mvneta_port *pp,
- struct phy_device *phy)
+static void mvneta_link_change(struct mvneta_port *pp)
{
- struct fixed_phy_status status;
- struct fixed_phy_status changed = {};
u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
- status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
- if (gmac_stat & MVNETA_GMAC_SPEED_1000)
- status.speed = SPEED_1000;
- else if (gmac_stat & MVNETA_GMAC_SPEED_100)
- status.speed = SPEED_100;
- else
- status.speed = SPEED_10;
- status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
- changed.link = 1;
- changed.speed = 1;
- changed.duplex = 1;
- fixed_phy_update_state(phy, &status, &changed);
- return 0;
+ phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP));
}
/* NAPI handler
@@ -2727,7 +2689,6 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
u32 cause_rx_tx;
int rx_queue;
struct mvneta_port *pp = netdev_priv(napi->dev);
- struct net_device *ndev = pp->dev;
struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
if (!netif_running(pp->dev)) {
@@ -2741,12 +2702,10 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
- if (pp->use_inband_status && (cause_misc &
- (MVNETA_CAUSE_PHY_STATUS_CHANGE |
- MVNETA_CAUSE_LINK_CHANGE |
- MVNETA_CAUSE_PSC_SYNC_CHANGE))) {
- mvneta_fixed_link_update(pp, ndev->phydev);
- }
+
+ if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE |
+ MVNETA_CAUSE_LINK_CHANGE))
+ mvneta_link_change(pp);
}
/* Release Tx descriptors */
@@ -3060,7 +3019,6 @@ static int mvneta_setup_txqs(struct mvneta_port *pp)
static void mvneta_start_dev(struct mvneta_port *pp)
{
int cpu;
- struct net_device *ndev = pp->dev;
mvneta_max_rx_size_set(pp, pp->pkt_size);
mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
@@ -3085,19 +3043,17 @@ static void mvneta_start_dev(struct mvneta_port *pp)
mvreg_write(pp, MVNETA_INTR_MISC_MASK,
MVNETA_CAUSE_PHY_STATUS_CHANGE |
- MVNETA_CAUSE_LINK_CHANGE |
- MVNETA_CAUSE_PSC_SYNC_CHANGE);
+ MVNETA_CAUSE_LINK_CHANGE);
- phy_start(ndev->phydev);
+ phylink_start(pp->phylink);
netif_tx_start_all_queues(pp->dev);
}
static void mvneta_stop_dev(struct mvneta_port *pp)
{
unsigned int cpu;
- struct net_device *ndev = pp->dev;
- phy_stop(ndev->phydev);
+ phylink_stop(pp->phylink);
if (!pp->neta_armada3700) {
for_each_online_cpu(cpu) {
@@ -3251,103 +3207,260 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
return 0;
}
-static void mvneta_adjust_link(struct net_device *ndev)
+static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ /* We only support QSGMII, SGMII, 802.3z and RGMII modes */
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != PHY_INTERFACE_MODE_QSGMII &&
+ state->interface != PHY_INTERFACE_MODE_SGMII &&
+ !phy_interface_mode_is_8023z(state->interface) &&
+ !phy_interface_mode_is_rgmii(state->interface)) {
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
+
+ /* Allow all the expected bits */
+ phylink_set(mask, Autoneg);
+ phylink_set_port_modes(mask);
+
+ /* Asymmetric pause is unsupported */
+ phylink_set(mask, Pause);
+ /* Half-duplex at speeds higher than 100Mbit is unsupported */
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+
+ if (!phy_interface_mode_is_8023z(state->interface)) {
+ /* 10M and 100M are only supported in non-802.3z mode */
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+ }
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static int mvneta_mac_link_state(struct net_device *ndev,
+ struct phylink_link_state *state)
{
struct mvneta_port *pp = netdev_priv(ndev);
- struct phy_device *phydev = ndev->phydev;
- int status_change = 0;
+ u32 gmac_stat;
+
+ gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
- if (phydev->link) {
- if ((pp->speed != phydev->speed) ||
- (pp->duplex != phydev->duplex)) {
- u32 val;
+ if (gmac_stat & MVNETA_GMAC_SPEED_1000)
+ state->speed = SPEED_1000;
+ else if (gmac_stat & MVNETA_GMAC_SPEED_100)
+ state->speed = SPEED_100;
+ else
+ state->speed = SPEED_10;
- val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
- val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
- MVNETA_GMAC_CONFIG_GMII_SPEED |
- MVNETA_GMAC_CONFIG_FULL_DUPLEX);
+ state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE);
+ state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
+ state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
+
+ state->pause = 0;
+ if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE)
+ state->pause |= MLO_PAUSE_RX;
+ if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE)
+ state->pause |= MLO_PAUSE_TX;
+
+ return 1;
+}
- if (phydev->duplex)
- val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+static void mvneta_mac_an_restart(struct net_device *ndev)
+{
+ struct mvneta_port *pp = netdev_priv(ndev);
+ u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
- if (phydev->speed == SPEED_1000)
- val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
- else if (phydev->speed == SPEED_100)
- val |= MVNETA_GMAC_CONFIG_MII_SPEED;
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
+ gmac_an | MVNETA_GMAC_INBAND_RESTART_AN);
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
+ gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN);
+}
- mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+static void mvneta_mac_config(struct net_device *ndev, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct mvneta_port *pp = netdev_priv(ndev);
+ u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
+ u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+ u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
+ u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+
+ new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X;
+ new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE |
+ MVNETA_GMAC2_PORT_RESET);
+ new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
+ new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE |
+ MVNETA_GMAC_INBAND_RESTART_AN |
+ MVNETA_GMAC_CONFIG_MII_SPEED |
+ MVNETA_GMAC_CONFIG_GMII_SPEED |
+ MVNETA_GMAC_AN_SPEED_EN |
+ MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL |
+ MVNETA_GMAC_CONFIG_FLOW_CTRL |
+ MVNETA_GMAC_AN_FLOW_CTRL_EN |
+ MVNETA_GMAC_CONFIG_FULL_DUPLEX |
+ MVNETA_GMAC_AN_DUPLEX_EN);
- pp->duplex = phydev->duplex;
- pp->speed = phydev->speed;
- }
+ /* Even though it might look weird, when we're configured in
+ * SGMII or QSGMII mode, the RGMII bit needs to be set.
+ */
+ new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII;
+
+ if (state->interface == PHY_INTERFACE_MODE_QSGMII ||
+ state->interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_8023z(state->interface))
+ new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE;
+
+ if (phylink_test(state->advertising, Pause))
+ new_an |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
+ if (state->pause & MLO_PAUSE_TXRX_MASK)
+ new_an |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
+
+ if (!phylink_autoneg_inband(mode)) {
+ /* Phy or fixed speed */
+ if (state->duplex)
+ new_an |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+
+ if (state->speed == SPEED_1000)
+ new_an |= MVNETA_GMAC_CONFIG_GMII_SPEED;
+ else if (state->speed == SPEED_100)
+ new_an |= MVNETA_GMAC_CONFIG_MII_SPEED;
+ } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
+ /* SGMII mode receives the state from the PHY */
+ new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE;
+ new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
+ new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
+ MVNETA_GMAC_FORCE_LINK_PASS)) |
+ MVNETA_GMAC_INBAND_AN_ENABLE |
+ MVNETA_GMAC_AN_SPEED_EN |
+ MVNETA_GMAC_AN_DUPLEX_EN;
+ } else {
+ /* 802.3z negotiation - only 1000base-X */
+ new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X;
+ new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
+ new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
+ MVNETA_GMAC_FORCE_LINK_PASS)) |
+ MVNETA_GMAC_INBAND_AN_ENABLE |
+ MVNETA_GMAC_CONFIG_GMII_SPEED |
+ /* The MAC only supports FD mode */
+ MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+
+ if (state->pause & MLO_PAUSE_AN && state->an_enabled)
+ new_an |= MVNETA_GMAC_AN_FLOW_CTRL_EN;
}
- if (phydev->link != pp->link) {
- if (!phydev->link) {
- pp->duplex = -1;
- pp->speed = 0;
- }
+ /* Armada 370 documentation says we can only change the port mode
+ * and in-band enable when the link is down, so force it down
+ * while making these changes. We also do this for GMAC_CTRL2 */
+ if ((new_ctrl0 ^ gmac_ctrl0) & MVNETA_GMAC0_PORT_1000BASE_X ||
+ (new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE ||
+ (new_an ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) {
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
+ (gmac_an & ~MVNETA_GMAC_FORCE_LINK_PASS) |
+ MVNETA_GMAC_FORCE_LINK_DOWN);
+ }
- pp->link = phydev->link;
- status_change = 1;
+ if (new_ctrl0 != gmac_ctrl0)
+ mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
+ if (new_ctrl2 != gmac_ctrl2)
+ mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
+ if (new_clk != gmac_clk)
+ mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk);
+ if (new_an != gmac_an)
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an);
+
+ if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) {
+ while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
+ MVNETA_GMAC2_PORT_RESET) != 0)
+ continue;
}
+}
- if (status_change) {
- if (phydev->link) {
- if (!pp->use_inband_status) {
- u32 val = mvreg_read(pp,
- MVNETA_GMAC_AUTONEG_CONFIG);
- val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
- val |= MVNETA_GMAC_FORCE_LINK_PASS;
- mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
- val);
- }
- mvneta_port_up(pp);
- } else {
- if (!pp->use_inband_status) {
- u32 val = mvreg_read(pp,
- MVNETA_GMAC_AUTONEG_CONFIG);
- val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
- val |= MVNETA_GMAC_FORCE_LINK_DOWN;
- mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
- val);
- }
- mvneta_port_down(pp);
- }
- phy_print_status(phydev);
+static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
+{
+ u32 lpi_ctl1;
+
+ lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
+ if (enable)
+ lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE;
+ else
+ lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE;
+ mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
+}
+
+static void mvneta_mac_link_down(struct net_device *ndev, unsigned int mode)
+{
+ struct mvneta_port *pp = netdev_priv(ndev);
+ u32 val;
+
+ mvneta_port_down(pp);
+
+ if (!phylink_autoneg_inband(mode)) {
+ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
+ val |= MVNETA_GMAC_FORCE_LINK_DOWN;
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
}
+
+ pp->eee_active = false;
+ mvneta_set_eee(pp, false);
}
-static int mvneta_mdio_probe(struct mvneta_port *pp)
+static void mvneta_mac_link_up(struct net_device *ndev, unsigned int mode,
+ struct phy_device *phy)
{
- struct phy_device *phy_dev;
- struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+ struct mvneta_port *pp = netdev_priv(ndev);
+ u32 val;
- phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
- pp->phy_interface);
- if (!phy_dev) {
- netdev_err(pp->dev, "could not find the PHY\n");
- return -ENODEV;
+ if (!phylink_autoneg_inband(mode)) {
+ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
+ val |= MVNETA_GMAC_FORCE_LINK_PASS;
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
}
- phy_ethtool_get_wol(phy_dev, &wol);
- device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
+ mvneta_port_up(pp);
- phy_dev->supported &= PHY_GBIT_FEATURES;
- phy_dev->advertising = phy_dev->supported;
+ if (phy && pp->eee_enabled) {
+ pp->eee_active = phy_init_eee(phy, 0) >= 0;
+ mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
+ }
+}
- pp->link = 0;
- pp->duplex = 0;
- pp->speed = 0;
+static const struct phylink_mac_ops mvneta_phylink_ops = {
+ .validate = mvneta_validate,
+ .mac_link_state = mvneta_mac_link_state,
+ .mac_an_restart = mvneta_mac_an_restart,
+ .mac_config = mvneta_mac_config,
+ .mac_link_down = mvneta_mac_link_down,
+ .mac_link_up = mvneta_mac_link_up,
+};
- return 0;
+static int mvneta_mdio_probe(struct mvneta_port *pp)
+{
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+ int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0);
+
+ if (err)
+ netdev_err(pp->dev, "could not attach PHY: %d\n", err);
+
+ phylink_ethtool_get_wol(pp->phylink, &wol);
+ device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
+
+ return err;
}
static void mvneta_mdio_remove(struct mvneta_port *pp)
{
- struct net_device *ndev = pp->dev;
-
- phy_disconnect(ndev->phydev);
+ phylink_disconnect_phy(pp->phylink);
}
/* Electing a CPU must be done in an atomic way: it should be done
@@ -3455,8 +3568,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
mvreg_write(pp, MVNETA_INTR_MISC_MASK,
MVNETA_CAUSE_PHY_STATUS_CHANGE |
- MVNETA_CAUSE_LINK_CHANGE |
- MVNETA_CAUSE_PSC_SYNC_CHANGE);
+ MVNETA_CAUSE_LINK_CHANGE);
netif_tx_start_all_queues(pp->dev);
spin_unlock(&pp->lock);
return 0;
@@ -3497,8 +3609,7 @@ static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
mvreg_write(pp, MVNETA_INTR_MISC_MASK,
MVNETA_CAUSE_PHY_STATUS_CHANGE |
- MVNETA_CAUSE_LINK_CHANGE |
- MVNETA_CAUSE_PSC_SYNC_CHANGE);
+ MVNETA_CAUSE_LINK_CHANGE);
netif_tx_start_all_queues(pp->dev);
return 0;
}
@@ -3626,10 +3737,9 @@ static int mvneta_stop(struct net_device *dev)
static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- if (!dev->phydev)
- return -ENOTSUPP;
+ struct mvneta_port *pp = netdev_priv(dev);
- return phy_mii_ioctl(dev->phydev, ifr, cmd);
+ return phylink_mii_ioctl(pp->phylink, ifr, cmd);
}
/* Ethtool methods */
@@ -3640,44 +3750,25 @@ mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *cmd)
{
struct mvneta_port *pp = netdev_priv(ndev);
- struct phy_device *phydev = ndev->phydev;
-
- if (!phydev)
- return -ENODEV;
-
- if ((cmd->base.autoneg == AUTONEG_ENABLE) != pp->use_inband_status) {
- u32 val;
-
- mvneta_set_autoneg(pp, cmd->base.autoneg == AUTONEG_ENABLE);
-
- if (cmd->base.autoneg == AUTONEG_DISABLE) {
- val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
- val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
- MVNETA_GMAC_CONFIG_GMII_SPEED |
- MVNETA_GMAC_CONFIG_FULL_DUPLEX);
-
- if (phydev->duplex)
- val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
- if (phydev->speed == SPEED_1000)
- val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
- else if (phydev->speed == SPEED_100)
- val |= MVNETA_GMAC_CONFIG_MII_SPEED;
+ return phylink_ethtool_ksettings_set(pp->phylink, cmd);
+}
- mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
- }
+/* Get link ksettings for ethtools */
+static int
+mvneta_ethtool_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct mvneta_port *pp = netdev_priv(ndev);
- pp->use_inband_status = (cmd->base.autoneg == AUTONEG_ENABLE);
- netdev_info(pp->dev, "autoneg status set to %i\n",
- pp->use_inband_status);
+ return phylink_ethtool_ksettings_get(pp->phylink, cmd);
+}
- if (netif_running(ndev)) {
- mvneta_port_down(pp);
- mvneta_port_up(pp);
- }
- }
+static int mvneta_ethtool_nway_reset(struct net_device *dev)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
- return phy_ethtool_ksettings_set(ndev->phydev, cmd);
+ return phylink_ethtool_nway_reset(pp->phylink);
}
/* Set interrupt coalescing for ethtools */
@@ -3769,6 +3860,22 @@ static int mvneta_ethtool_set_ringparam(struct net_device *dev,
return 0;
}
+static void mvneta_ethtool_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ phylink_ethtool_get_pauseparam(pp->phylink, pause);
+}
+
+static int mvneta_ethtool_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ return phylink_ethtool_set_pauseparam(pp->phylink, pause);
+}
+
static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
u8 *data)
{
@@ -3785,26 +3892,35 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
{
const struct mvneta_statistic *s;
void __iomem *base = pp->base;
- u32 high, low, val;
- u64 val64;
+ u32 high, low;
+ u64 val;
int i;
for (i = 0, s = mvneta_statistics;
s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
s++, i++) {
+ val = 0;
+
switch (s->type) {
case T_REG_32:
val = readl_relaxed(base + s->offset);
- pp->ethtool_stats[i] += val;
break;
case T_REG_64:
/* Docs say to read low 32-bit then high */
low = readl_relaxed(base + s->offset);
high = readl_relaxed(base + s->offset + 4);
- val64 = (u64)high << 32 | low;
- pp->ethtool_stats[i] += val64;
+ val = (u64)high << 32 | low;
+ break;
+ case T_SW:
+ switch (s->offset) {
+ case ETHTOOL_STAT_EEE_WAKEUP:
+ val = phylink_get_eee_err(pp->phylink);
+ break;
+ }
break;
}
+
+ pp->ethtool_stats[i] += val;
}
}
@@ -3939,28 +4055,81 @@ static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
static void mvneta_ethtool_get_wol(struct net_device *dev,
struct ethtool_wolinfo *wol)
{
- wol->supported = 0;
- wol->wolopts = 0;
+ struct mvneta_port *pp = netdev_priv(dev);
- if (dev->phydev)
- phy_ethtool_get_wol(dev->phydev, wol);
+ phylink_ethtool_get_wol(pp->phylink, wol);
}
static int mvneta_ethtool_set_wol(struct net_device *dev,
struct ethtool_wolinfo *wol)
{
+ struct mvneta_port *pp = netdev_priv(dev);
int ret;
- if (!dev->phydev)
- return -EOPNOTSUPP;
-
- ret = phy_ethtool_set_wol(dev->phydev, wol);
+ ret = phylink_ethtool_set_wol(pp->phylink, wol);
if (!ret)
device_set_wakeup_enable(&dev->dev, !!wol->wolopts);
return ret;
}
+static int mvneta_ethtool_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ return phylink_ethtool_get_module_info(pp->phylink, modinfo);
+}
+
+static int mvneta_ethtool_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee, u8 *buf)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ return phylink_ethtool_get_module_eeprom(pp->phylink, ee, buf);
+}
+
+static int mvneta_ethtool_get_eee(struct net_device *dev,
+ struct ethtool_eee *eee)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ u32 lpi_ctl0;
+
+ lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
+
+ eee->eee_enabled = pp->eee_enabled;
+ eee->eee_active = pp->eee_active;
+ eee->tx_lpi_enabled = pp->tx_lpi_enabled;
+ eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale;
+
+ return phylink_ethtool_get_eee(pp->phylink, eee);
+}
+
+static int mvneta_ethtool_set_eee(struct net_device *dev,
+ struct ethtool_eee *eee)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ u32 lpi_ctl0;
+
+ /* The Armada 37x documents do not give limits for this other than
+ * it being an 8-bit register. */
+ if (eee->tx_lpi_enabled &&
+ (eee->tx_lpi_timer < 0 || eee->tx_lpi_timer > 255))
+ return -EINVAL;
+
+ lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
+ lpi_ctl0 &= ~(0xff << 8);
+ lpi_ctl0 |= eee->tx_lpi_timer << 8;
+ mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0);
+
+ pp->eee_enabled = eee->eee_enabled;
+ pp->tx_lpi_enabled = eee->tx_lpi_enabled;
+
+ mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled);
+
+ return phylink_ethtool_set_eee(pp->phylink, eee);
+}
+
static const struct net_device_ops mvneta_netdev_ops = {
.ndo_open = mvneta_open,
.ndo_stop = mvneta_stop,
@@ -3974,13 +4143,15 @@ static const struct net_device_ops mvneta_netdev_ops = {
};
static const struct ethtool_ops mvneta_eth_tool_ops = {
- .nway_reset = phy_ethtool_nway_reset,
+ .nway_reset = mvneta_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.set_coalesce = mvneta_ethtool_set_coalesce,
.get_coalesce = mvneta_ethtool_get_coalesce,
.get_drvinfo = mvneta_ethtool_get_drvinfo,
.get_ringparam = mvneta_ethtool_get_ringparam,
.set_ringparam = mvneta_ethtool_set_ringparam,
+ .get_pauseparam = mvneta_ethtool_get_pauseparam,
+ .set_pauseparam = mvneta_ethtool_set_pauseparam,
.get_strings = mvneta_ethtool_get_strings,
.get_ethtool_stats = mvneta_ethtool_get_stats,
.get_sset_count = mvneta_ethtool_get_sset_count,
@@ -3988,10 +4159,14 @@ static const struct ethtool_ops mvneta_eth_tool_ops = {
.get_rxnfc = mvneta_ethtool_get_rxnfc,
.get_rxfh = mvneta_ethtool_get_rxfh,
.set_rxfh = mvneta_ethtool_set_rxfh,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .get_link_ksettings = mvneta_ethtool_get_link_ksettings,
.set_link_ksettings = mvneta_ethtool_set_link_ksettings,
.get_wol = mvneta_ethtool_get_wol,
.set_wol = mvneta_ethtool_set_wol,
+ .get_module_info = mvneta_ethtool_get_module_info,
+ .get_module_eeprom = mvneta_ethtool_get_module_eeprom,
+ .get_eee = mvneta_ethtool_get_eee,
+ .set_eee = mvneta_ethtool_set_eee,
};
/* Initialize hw */
@@ -4091,42 +4266,16 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
/* Power up the port */
static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
{
- u32 ctrl;
-
/* MAC Cause register should be cleared */
mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
- ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
-
- /* Even though it might look weird, when we're configured in
- * SGMII or QSGMII mode, the RGMII bit needs to be set.
- */
- switch(phy_mode) {
- case PHY_INTERFACE_MODE_QSGMII:
+ if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
- ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
- break;
- case PHY_INTERFACE_MODE_SGMII:
+ else if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
+ phy_mode == PHY_INTERFACE_MODE_1000BASEX)
mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
- ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
- break;
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- ctrl |= MVNETA_GMAC2_PORT_RGMII;
- break;
- default:
+ else if (!phy_interface_mode_is_rgmii(phy_mode))
return -EINVAL;
- }
-
- /* Cancel Port Reset */
- ctrl &= ~MVNETA_GMAC2_PORT_RESET;
- mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
-
- while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
- MVNETA_GMAC2_PORT_RESET) != 0)
- continue;
return 0;
}
@@ -4136,14 +4285,13 @@ static int mvneta_probe(struct platform_device *pdev)
{
struct resource *res;
struct device_node *dn = pdev->dev.of_node;
- struct device_node *phy_node;
struct device_node *bm_node;
struct mvneta_port *pp;
struct net_device *dev;
+ struct phylink *phylink;
const char *dt_mac_addr;
char hw_mac_addr[ETH_ALEN];
const char *mac_from;
- const char *managed;
int tx_csum_limit;
int phy_mode;
int err;
@@ -4159,31 +4307,18 @@ static int mvneta_probe(struct platform_device *pdev)
goto err_free_netdev;
}
- phy_node = of_parse_phandle(dn, "phy", 0);
- if (!phy_node) {
- if (!of_phy_is_fixed_link(dn)) {
- dev_err(&pdev->dev, "no PHY specified\n");
- err = -ENODEV;
- goto err_free_irq;
- }
-
- err = of_phy_register_fixed_link(dn);
- if (err < 0) {
- dev_err(&pdev->dev, "cannot register fixed PHY\n");
- goto err_free_irq;
- }
-
- /* In the case of a fixed PHY, the DT node associated
- * to the PHY is the Ethernet MAC DT node.
- */
- phy_node = of_node_get(dn);
- }
-
phy_mode = of_get_phy_mode(dn);
if (phy_mode < 0) {
dev_err(&pdev->dev, "incorrect phy-mode\n");
err = -EINVAL;
- goto err_put_phy_node;
+ goto err_free_irq;
+ }
+
+ phylink = phylink_create(dev, pdev->dev.fwnode, phy_mode,
+ &mvneta_phylink_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ goto err_free_irq;
}
dev->tx_queue_len = MVNETA_MAX_TXD;
@@ -4194,12 +4329,9 @@ static int mvneta_probe(struct platform_device *pdev)
pp = netdev_priv(dev);
spin_lock_init(&pp->lock);
- pp->phy_node = phy_node;
+ pp->phylink = phylink;
pp->phy_interface = phy_mode;
-
- err = of_property_read_string(dn, "managed", &managed);
- pp->use_inband_status = (err == 0 &&
- strcmp(managed, "in-band-status") == 0);
+ pp->dn = dn;
pp->rxq_def = rxq_def;
@@ -4221,7 +4353,7 @@ static int mvneta_probe(struct platform_device *pdev)
pp->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pp->clk)) {
err = PTR_ERR(pp->clk);
- goto err_put_phy_node;
+ goto err_free_phylink;
}
clk_prepare_enable(pp->clk);
@@ -4358,14 +4490,6 @@ static int mvneta_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pp->dev);
- if (pp->use_inband_status) {
- struct phy_device *phy = of_phy_find_device(dn);
-
- mvneta_fixed_link_update(pp, phy);
-
- put_device(&phy->mdio.dev);
- }
-
return 0;
err_netdev:
@@ -4382,10 +4506,9 @@ err_free_ports:
err_clk:
clk_disable_unprepare(pp->clk_bus);
clk_disable_unprepare(pp->clk);
-err_put_phy_node:
- of_node_put(phy_node);
- if (of_phy_is_fixed_link(dn))
- of_phy_deregister_fixed_link(dn);
+err_free_phylink:
+ if (pp->phylink)
+ phylink_destroy(pp->phylink);
err_free_irq:
irq_dispose_mapping(dev->irq);
err_free_netdev:
@@ -4397,7 +4520,6 @@ err_free_netdev:
static int mvneta_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
- struct device_node *dn = pdev->dev.of_node;
struct mvneta_port *pp = netdev_priv(dev);
unregister_netdev(dev);
@@ -4405,10 +4527,8 @@ static int mvneta_remove(struct platform_device *pdev)
clk_disable_unprepare(pp->clk);
free_percpu(pp->ports);
free_percpu(pp->stats);
- if (of_phy_is_fixed_link(dn))
- of_phy_deregister_fixed_link(dn);
irq_dispose_mapping(dev->irq);
- of_node_put(pp->phy_node);
+ phylink_destroy(pp->phylink);
free_netdev(dev);
if (pp->bm_priv) {
@@ -4426,8 +4546,10 @@ static int mvneta_suspend(struct device *device)
struct net_device *dev = dev_get_drvdata(device);
struct mvneta_port *pp = netdev_priv(dev);
+ rtnl_lock();
if (netif_running(dev))
mvneta_stop(dev);
+ rtnl_unlock();
netif_device_detach(dev);
clk_disable_unprepare(pp->clk_bus);
clk_disable_unprepare(pp->clk);
@@ -4460,14 +4582,13 @@ static int mvneta_resume(struct device *device)
return err;
}
- if (pp->use_inband_status)
- mvneta_fixed_link_update(pp, dev->phydev);
-
netif_device_attach(dev);
+ rtnl_lock();
if (netif_running(dev)) {
mvneta_open(dev);
mvneta_set_rx_mode(dev);
}
+ rtnl_unlock();
return 0;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 634b2f41cc9e..a1d7b88cf083 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -10,6 +10,7 @@
* warranty of any kind, whether express or implied.
*/
+#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -454,11 +455,11 @@
/* Various constants */
/* Coalescing */
-#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
+#define MVPP2_TXDONE_COAL_PKTS_THRESH 64
#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
#define MVPP2_TXDONE_COAL_USEC 1000
#define MVPP2_RX_COAL_PKTS 32
-#define MVPP2_RX_COAL_USEC 100
+#define MVPP2_RX_COAL_USEC 64
/* The two bytes Marvell header. Either contains a special value used
* by Marvell switches when a specific hardware mode is enabled (not
@@ -504,10 +505,12 @@
#define MVPP2_DEFAULT_RXQ 4
/* Max number of Rx descriptors */
-#define MVPP2_MAX_RXD 128
+#define MVPP2_MAX_RXD_MAX 1024
+#define MVPP2_MAX_RXD_DFLT 128
/* Max number of Tx descriptors */
-#define MVPP2_MAX_TXD 1024
+#define MVPP2_MAX_TXD_MAX 2048
+#define MVPP2_MAX_TXD_DFLT 1024
/* Amount of Tx descriptors that can be reserved at once by CPU */
#define MVPP2_CPU_DESC_CHUNK 64
@@ -863,7 +866,7 @@ struct mvpp2 {
/* List of pointers to port structures */
int port_count;
- struct mvpp2_port **port_list;
+ struct mvpp2_port *port_list[MVPP2_MAX_PORTS];
/* Aggregated TXQs */
struct mvpp2_tx_queue *aggr_txqs;
@@ -930,6 +933,9 @@ struct mvpp2_port {
struct mvpp2 *priv;
+ /* Firmware node associated to the port */
+ struct fwnode_handle *fwnode;
+
/* Per-port registers' base address */
void __iomem *base;
void __iomem *stats_base;
@@ -5802,6 +5808,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
txq_pcpu->reserved_num = 0;
txq_pcpu->txq_put_index = 0;
txq_pcpu->txq_get_index = 0;
+ txq_pcpu->tso_headers = NULL;
txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
@@ -5829,10 +5836,13 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
kfree(txq_pcpu->buffs);
- dma_free_coherent(port->dev->dev.parent,
- txq_pcpu->size * TSO_HEADER_SIZE,
- txq_pcpu->tso_headers,
- txq_pcpu->tso_headers_dma);
+ if (txq_pcpu->tso_headers)
+ dma_free_coherent(port->dev->dev.parent,
+ txq_pcpu->size * TSO_HEADER_SIZE,
+ txq_pcpu->tso_headers,
+ txq_pcpu->tso_headers_dma);
+
+ txq_pcpu->tso_headers = NULL;
}
if (txq->descs)
@@ -6832,13 +6842,13 @@ static int mvpp2_check_ringparam_valid(struct net_device *dev,
if (ring->rx_pending == 0 || ring->tx_pending == 0)
return -EINVAL;
- if (ring->rx_pending > MVPP2_MAX_RXD)
- new_rx_pending = MVPP2_MAX_RXD;
+ if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
+ new_rx_pending = MVPP2_MAX_RXD_MAX;
else if (!IS_ALIGNED(ring->rx_pending, 16))
new_rx_pending = ALIGN(ring->rx_pending, 16);
- if (ring->tx_pending > MVPP2_MAX_TXD)
- new_tx_pending = MVPP2_MAX_TXD;
+ if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
+ new_tx_pending = MVPP2_MAX_TXD_MAX;
else if (!IS_ALIGNED(ring->tx_pending, 32))
new_tx_pending = ALIGN(ring->tx_pending, 32);
@@ -7318,9 +7328,10 @@ static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
{
struct mvpp2_port *port = netdev_priv(dev);
- c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
- c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
- c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
+ c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
+ c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
+ c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
+ c->tx_coalesce_usecs = port->tx_time_coal;
return 0;
}
@@ -7340,8 +7351,8 @@ static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
{
struct mvpp2_port *port = netdev_priv(dev);
- ring->rx_max_pending = MVPP2_MAX_RXD;
- ring->tx_max_pending = MVPP2_MAX_TXD;
+ ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
+ ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
ring->rx_pending = port->rx_ring_size;
ring->tx_pending = port->tx_ring_size;
}
@@ -7492,7 +7503,10 @@ static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
strncpy(irqname, "rx-shared", sizeof(irqname));
}
- v->irq = of_irq_get_byname(port_node, irqname);
+ if (port_node)
+ v->irq = of_irq_get_byname(port_node, irqname);
+ else
+ v->irq = fwnode_irq_get(port->fwnode, i);
if (v->irq <= 0) {
ret = -EINVAL;
goto err;
@@ -7704,17 +7718,16 @@ static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv,
}
static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
- struct device_node *port_node,
+ struct fwnode_handle *fwnode,
char **mac_from)
{
struct mvpp2_port *port = netdev_priv(dev);
char hw_mac_addr[ETH_ALEN] = {0};
- const char *dt_mac_addr;
+ char fw_mac_addr[ETH_ALEN];
- dt_mac_addr = of_get_mac_address(port_node);
- if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
- *mac_from = "device tree";
- ether_addr_copy(dev->dev_addr, dt_mac_addr);
+ if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) {
+ *mac_from = "firmware node";
+ ether_addr_copy(dev->dev_addr, fw_mac_addr);
return;
}
@@ -7733,13 +7746,14 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
/* Ports initialization */
static int mvpp2_port_probe(struct platform_device *pdev,
- struct device_node *port_node,
- struct mvpp2 *priv, int index)
+ struct fwnode_handle *port_fwnode,
+ struct mvpp2 *priv)
{
struct device_node *phy_node;
- struct phy *comphy;
+ struct phy *comphy = NULL;
struct mvpp2_port *port;
struct mvpp2_port_pcpu *port_pcpu;
+ struct device_node *port_node = to_of_node(port_fwnode);
struct net_device *dev;
struct resource *res;
char *mac_from = "";
@@ -7750,7 +7764,12 @@ static int mvpp2_port_probe(struct platform_device *pdev,
int phy_mode;
int err, i, cpu;
- has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
+ if (port_node) {
+ has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
+ } else {
+ has_tx_irqs = true;
+ queue_mode = MVPP2_QDIST_MULTI_MODE;
+ }
if (!has_tx_irqs)
queue_mode = MVPP2_QDIST_SINGLE_MODE;
@@ -7765,36 +7784,43 @@ static int mvpp2_port_probe(struct platform_device *pdev,
if (!dev)
return -ENOMEM;
- phy_node = of_parse_phandle(port_node, "phy", 0);
- phy_mode = of_get_phy_mode(port_node);
+ if (port_node)
+ phy_node = of_parse_phandle(port_node, "phy", 0);
+ else
+ phy_node = NULL;
+
+ phy_mode = fwnode_get_phy_mode(port_fwnode);
if (phy_mode < 0) {
dev_err(&pdev->dev, "incorrect phy mode\n");
err = phy_mode;
goto err_free_netdev;
}
- comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
- if (IS_ERR(comphy)) {
- if (PTR_ERR(comphy) == -EPROBE_DEFER) {
- err = -EPROBE_DEFER;
- goto err_free_netdev;
+ if (port_node) {
+ comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
+ if (IS_ERR(comphy)) {
+ if (PTR_ERR(comphy) == -EPROBE_DEFER) {
+ err = -EPROBE_DEFER;
+ goto err_free_netdev;
+ }
+ comphy = NULL;
}
- comphy = NULL;
}
- if (of_property_read_u32(port_node, "port-id", &id)) {
+ if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) {
err = -EINVAL;
dev_err(&pdev->dev, "missing port-id value\n");
goto err_free_netdev;
}
- dev->tx_queue_len = MVPP2_MAX_TXD;
+ dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
dev->watchdog_timeo = 5 * HZ;
dev->netdev_ops = &mvpp2_netdev_ops;
dev->ethtool_ops = &mvpp2_eth_tool_ops;
port = netdev_priv(dev);
port->dev = dev;
+ port->fwnode = port_fwnode;
port->ntxqs = ntxqs;
port->nrxqs = nrxqs;
port->priv = priv;
@@ -7804,7 +7830,10 @@ static int mvpp2_port_probe(struct platform_device *pdev,
if (err)
goto err_free_netdev;
- port->link_irq = of_irq_get_byname(port_node, "link");
+ if (port_node)
+ port->link_irq = of_irq_get_byname(port_node, "link");
+ else
+ port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
if (port->link_irq == -EPROBE_DEFER) {
err = -EPROBE_DEFER;
goto err_deinit_qvecs;
@@ -7813,7 +7842,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
/* the link irq is optional */
port->link_irq = 0;
- if (of_property_read_bool(port_node, "marvell,loopback"))
+ if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
port->flags |= MVPP2_F_LOOPBACK;
port->id = id;
@@ -7838,8 +7867,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
MVPP21_MIB_COUNTERS_OFFSET +
port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
} else {
- if (of_property_read_u32(port_node, "gop-port-id",
- &port->gop_id)) {
+ if (fwnode_property_read_u32(port_fwnode, "gop-port-id",
+ &port->gop_id)) {
err = -EINVAL;
dev_err(&pdev->dev, "missing gop-port-id value\n");
goto err_deinit_qvecs;
@@ -7869,10 +7898,10 @@ static int mvpp2_port_probe(struct platform_device *pdev,
mutex_init(&port->gather_stats_lock);
INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
- mvpp2_port_copy_mac_addr(dev, priv, port_node, &mac_from);
+ mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
- port->tx_ring_size = MVPP2_MAX_TXD;
- port->rx_ring_size = MVPP2_MAX_RXD;
+ port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
+ port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
SET_NETDEV_DEV(dev, &pdev->dev);
err = mvpp2_port_init(port);
@@ -7927,7 +7956,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
}
netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
- priv->port_list[index] = port;
+ priv->port_list[priv->port_count++] = port;
+
return 0;
err_free_port_pcpu:
@@ -8186,8 +8216,9 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
static int mvpp2_probe(struct platform_device *pdev)
{
- struct device_node *dn = pdev->dev.of_node;
- struct device_node *port_node;
+ const struct acpi_device_id *acpi_id;
+ struct fwnode_handle *fwnode = pdev->dev.fwnode;
+ struct fwnode_handle *port_fwnode;
struct mvpp2 *priv;
struct resource *res;
void __iomem *base;
@@ -8198,8 +8229,14 @@ static int mvpp2_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->hw_version =
- (unsigned long)of_device_get_match_data(&pdev->dev);
+ if (has_acpi_companion(&pdev->dev)) {
+ acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
+ &pdev->dev);
+ priv->hw_version = (unsigned long)acpi_id->driver_data;
+ } else {
+ priv->hw_version =
+ (unsigned long)of_device_get_match_data(&pdev->dev);
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
@@ -8213,10 +8250,23 @@ static int mvpp2_probe(struct platform_device *pdev)
return PTR_ERR(priv->lms_base);
} else {
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (has_acpi_companion(&pdev->dev)) {
+ /* In case the MDIO memory region is declared in
+ * the ACPI, it can already appear as 'in-use'
+ * in the OS. Because it is overlapped by second
+ * region of the network controller, make
+ * sure it is released, before requesting it again.
+ * The care is taken by mvpp2 driver to avoid
+ * concurrent access to this memory region.
+ */
+ release_resource(res);
+ }
priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->iface_base))
return PTR_ERR(priv->iface_base);
+ }
+ if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) {
priv->sysctrl_base =
syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"marvell,system-controller");
@@ -8242,32 +8292,34 @@ static int mvpp2_probe(struct platform_device *pdev)
else
priv->max_port_rxqs = 32;
- priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
- if (IS_ERR(priv->pp_clk))
- return PTR_ERR(priv->pp_clk);
- err = clk_prepare_enable(priv->pp_clk);
- if (err < 0)
- return err;
-
- priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
- if (IS_ERR(priv->gop_clk)) {
- err = PTR_ERR(priv->gop_clk);
- goto err_pp_clk;
- }
- err = clk_prepare_enable(priv->gop_clk);
- if (err < 0)
- goto err_pp_clk;
+ if (dev_of_node(&pdev->dev)) {
+ priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
+ if (IS_ERR(priv->pp_clk))
+ return PTR_ERR(priv->pp_clk);
+ err = clk_prepare_enable(priv->pp_clk);
+ if (err < 0)
+ return err;
- if (priv->hw_version == MVPP22) {
- priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
- if (IS_ERR(priv->mg_clk)) {
- err = PTR_ERR(priv->mg_clk);
- goto err_gop_clk;
+ priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
+ if (IS_ERR(priv->gop_clk)) {
+ err = PTR_ERR(priv->gop_clk);
+ goto err_pp_clk;
}
-
- err = clk_prepare_enable(priv->mg_clk);
+ err = clk_prepare_enable(priv->gop_clk);
if (err < 0)
- goto err_gop_clk;
+ goto err_pp_clk;
+
+ if (priv->hw_version == MVPP22) {
+ priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
+ if (IS_ERR(priv->mg_clk)) {
+ err = PTR_ERR(priv->mg_clk);
+ goto err_gop_clk;
+ }
+
+ err = clk_prepare_enable(priv->mg_clk);
+ if (err < 0)
+ goto err_gop_clk;
+ }
priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
if (IS_ERR(priv->axi_clk)) {
@@ -8280,10 +8332,14 @@ static int mvpp2_probe(struct platform_device *pdev)
if (err < 0)
goto err_gop_clk;
}
- }
- /* Get system's tclk rate */
- priv->tclk = clk_get_rate(priv->pp_clk);
+ /* Get system's tclk rate */
+ priv->tclk = clk_get_rate(priv->pp_clk);
+ } else if (device_property_read_u32(&pdev->dev, "clock-frequency",
+ &priv->tclk)) {
+ dev_err(&pdev->dev, "missing clock-frequency value\n");
+ return -EINVAL;
+ }
if (priv->hw_version == MVPP22) {
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
@@ -8306,30 +8362,19 @@ static int mvpp2_probe(struct platform_device *pdev)
goto err_mg_clk;
}
- priv->port_count = of_get_available_child_count(dn);
+ /* Initialize ports */
+ fwnode_for_each_available_child_node(fwnode, port_fwnode) {
+ err = mvpp2_port_probe(pdev, port_fwnode, priv);
+ if (err < 0)
+ goto err_port_probe;
+ }
+
if (priv->port_count == 0) {
dev_err(&pdev->dev, "no ports enabled\n");
err = -ENODEV;
goto err_mg_clk;
}
- priv->port_list = devm_kcalloc(&pdev->dev, priv->port_count,
- sizeof(*priv->port_list),
- GFP_KERNEL);
- if (!priv->port_list) {
- err = -ENOMEM;
- goto err_mg_clk;
- }
-
- /* Initialize ports */
- i = 0;
- for_each_available_child_of_node(dn, port_node) {
- err = mvpp2_port_probe(pdev, port_node, priv, i);
- if (err < 0)
- goto err_port_probe;
- i++;
- }
-
/* Statistics must be gathered regularly because some of them (like
* packets counters) are 32-bit registers and could overflow quite
* quickly. For instance, a 10Gb link used at full bandwidth with the
@@ -8350,7 +8395,7 @@ static int mvpp2_probe(struct platform_device *pdev)
err_port_probe:
i = 0;
- for_each_available_child_of_node(dn, port_node) {
+ fwnode_for_each_available_child_node(fwnode, port_fwnode) {
if (priv->port_list[i])
mvpp2_port_remove(priv->port_list[i]);
i++;
@@ -8369,14 +8414,14 @@ err_pp_clk:
static int mvpp2_remove(struct platform_device *pdev)
{
struct mvpp2 *priv = platform_get_drvdata(pdev);
- struct device_node *dn = pdev->dev.of_node;
- struct device_node *port_node;
+ struct fwnode_handle *fwnode = pdev->dev.fwnode;
+ struct fwnode_handle *port_fwnode;
int i = 0;
flush_workqueue(priv->stats_queue);
destroy_workqueue(priv->stats_queue);
- for_each_available_child_of_node(dn, port_node) {
+ fwnode_for_each_available_child_node(fwnode, port_fwnode) {
if (priv->port_list[i]) {
mutex_destroy(&priv->port_list[i]->gather_stats_lock);
mvpp2_port_remove(priv->port_list[i]);
@@ -8399,6 +8444,9 @@ static int mvpp2_remove(struct platform_device *pdev)
aggr_txq->descs_dma);
}
+ if (is_acpi_node(port_fwnode))
+ return 0;
+
clk_disable_unprepare(priv->axi_clk);
clk_disable_unprepare(priv->mg_clk);
clk_disable_unprepare(priv->pp_clk);
@@ -8420,12 +8468,19 @@ static const struct of_device_id mvpp2_match[] = {
};
MODULE_DEVICE_TABLE(of, mvpp2_match);
+static const struct acpi_device_id mvpp2_acpi_match[] = {
+ { "MRVL0110", MVPP22 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
+
static struct platform_driver mvpp2_driver = {
.probe = mvpp2_probe,
.remove = mvpp2_remove,
.driver = {
.name = MVPP2_DRIVER_NAME,
.of_match_table = mvpp2_match,
+ .acpi_match_table = ACPI_PTR(mvpp2_acpi_match),
},
};
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 9efe1771423c..9fe85300e7b6 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4287,7 +4287,7 @@ static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy)
dev_err(&hw->pdev->dev, "VPD cycle timed out\n");
return -ETIMEDOUT;
}
- mdelay(1);
+ msleep(1);
}
return 0;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index fc67e35b253e..29826dd15204 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1952,14 +1952,16 @@ static int mtk_hw_init(struct mtk_eth *eth)
}
regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
- /* Set GE2 driving and slew rate */
- regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
+ if (eth->pctl) {
+ /* Set GE2 driving and slew rate */
+ regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
- /* set GE2 TDSEL */
- regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
+ /* set GE2 TDSEL */
+ regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
- /* set GE2 TUNE */
- regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
+ /* set GE2 TUNE */
+ regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
+ }
/* Set linkdown as the default for each GMAC. Its own MCR would be set
* up with the more appropriate value when mtk_phy_link_adjust call is
@@ -2538,11 +2540,13 @@ static int mtk_probe(struct platform_device *pdev)
}
}
- eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "mediatek,pctl");
- if (IS_ERR(eth->pctl)) {
- dev_err(&pdev->dev, "no pctl regmap found\n");
- return PTR_ERR(eth->pctl);
+ if (eth->soc->required_pctl) {
+ eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "mediatek,pctl");
+ if (IS_ERR(eth->pctl)) {
+ dev_err(&pdev->dev, "no pctl regmap found\n");
+ return PTR_ERR(eth->pctl);
+ }
}
for (i = 0; i < 3; i++) {
@@ -2668,17 +2672,20 @@ static int mtk_remove(struct platform_device *pdev)
static const struct mtk_soc_data mt2701_data = {
.caps = MTK_GMAC1_TRGMII,
- .required_clks = MT7623_CLKS_BITMAP
+ .required_clks = MT7623_CLKS_BITMAP,
+ .required_pctl = true,
};
static const struct mtk_soc_data mt7622_data = {
.caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW,
- .required_clks = MT7622_CLKS_BITMAP
+ .required_clks = MT7622_CLKS_BITMAP,
+ .required_pctl = false,
};
static const struct mtk_soc_data mt7623_data = {
.caps = MTK_GMAC1_TRGMII,
- .required_clks = MT7623_CLKS_BITMAP
+ .required_clks = MT7623_CLKS_BITMAP,
+ .required_pctl = true,
};
const struct of_device_id of_mtk_match[] = {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index a3af4660de81..672b8c353c47 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -573,10 +573,13 @@ struct mtk_rx_ring {
* @caps Flags shown the extra capability for the SoC
* @required_clks Flags shown the bitmap for required clocks on
* the target SoC
+ * @required_pctl A bool value to show whether the SoC requires
+ * the extra setup for those pins used by GMAC.
*/
struct mtk_soc_data {
u32 caps;
u32 required_clks;
+ bool required_pctl;
};
/* currently no SoC has more than 2 macs */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index 5f41dc92aa68..1a0c3bf86ead 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -310,6 +310,7 @@ static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
}
switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_VENDOR:
case IEEE_8021QAZ_TSA_STRICT:
break;
case IEEE_8021QAZ_TSA_ETS:
@@ -347,6 +348,10 @@ static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv,
/* higher TC means higher priority => lower pg */
for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) {
switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_VENDOR:
+ pg[i] = MLX4_EN_TC_VENDOR;
+ tc_tx_bw[i] = MLX4_EN_BW_MAX;
+ break;
case IEEE_8021QAZ_TSA_STRICT:
pg[i] = num_strict++;
tc_tx_bw[i] = MLX4_EN_BW_MAX;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index bf1f04164885..ebc1f566a4d9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1094,12 +1094,21 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
if (param->rx_jumbo_pending || param->rx_mini_pending)
return -EINVAL;
+ if (param->rx_pending < MLX4_EN_MIN_RX_SIZE) {
+ en_warn(priv, "%s: rx_pending (%d) < min (%d)\n",
+ __func__, param->rx_pending,
+ MLX4_EN_MIN_RX_SIZE);
+ return -EINVAL;
+ }
+ if (param->tx_pending < MLX4_EN_MIN_TX_SIZE) {
+ en_warn(priv, "%s: tx_pending (%d) < min (%lu)\n",
+ __func__, param->tx_pending,
+ MLX4_EN_MIN_TX_SIZE);
+ return -EINVAL;
+ }
+
rx_size = roundup_pow_of_two(param->rx_pending);
- rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
- rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
tx_size = roundup_pow_of_two(param->tx_pending);
- tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
- tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
priv->rx_ring[0]->size) &&
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 99051a294fa6..8fc51bc29003 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2172,8 +2172,9 @@ static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
prof->rx_ring_size, priv->stride,
- node))
+ node, i))
goto err;
+
}
#ifdef CONFIG_RFS_ACCEL
@@ -3336,6 +3337,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->msg_enable = MLX4_EN_MSG_LEVEL;
#ifdef CONFIG_MLX4_EN_DCB
if (!mlx4_is_slave(priv->mdev->dev)) {
+ u8 prio;
+
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; ++prio) {
+ priv->ets.prio_tc[prio] = prio;
+ priv->ets.tc_tsa[prio] = IEEE_8021QAZ_TSA_VENDOR;
+ }
+
priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
DCB_CAP_DCBX_VER_IEEE;
priv->flags |= MLX4_EN_DCB_ENABLED;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 85e28efcda33..b4d144e67514 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -262,7 +262,7 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
- u32 size, u16 stride, int node)
+ u32 size, u16 stride, int node, int queue_index)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_rx_ring *ring;
@@ -286,6 +286,9 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
ring->log_stride = ffs(ring->stride) - 1;
ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
+ if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index) < 0)
+ goto err_ring;
+
tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
sizeof(struct mlx4_en_rx_alloc));
ring->rx_info = vzalloc_node(tmp, node);
@@ -293,7 +296,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
ring->rx_info = vzalloc(tmp);
if (!ring->rx_info) {
err = -ENOMEM;
- goto err_ring;
+ goto err_xdp_info;
}
}
@@ -317,6 +320,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
err_info:
vfree(ring->rx_info);
ring->rx_info = NULL;
+err_xdp_info:
+ xdp_rxq_info_unreg(&ring->xdp_rxq);
err_ring:
kfree(ring);
*pring = NULL;
@@ -440,6 +445,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
lockdep_is_held(&mdev->state_lock));
if (old_prog)
bpf_prog_put(old_prog);
+ xdp_rxq_info_unreg(&ring->xdp_rxq);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
vfree(ring->rx_info);
ring->rx_info = NULL;
@@ -617,6 +623,10 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
return 0;
}
#endif
+
+/* We reach this function only after checking that any of
+ * the (IPv4 | IPv6) bits are set in cqe->status.
+ */
static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
netdev_features_t dev_features)
{
@@ -632,13 +642,11 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
hdr += sizeof(struct vlan_hdr);
}
- if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4))
- return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
#if IS_ENABLED(CONFIG_IPV6)
if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
return get_fixed_ipv6_csum(hw_checksum, skb, hdr);
#endif
- return 0;
+ return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
}
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
@@ -650,6 +658,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
int cq_ring = cq->ring;
bool doorbell_pending;
struct mlx4_cqe *cqe;
+ struct xdp_buff xdp;
int polled = 0;
int index;
@@ -664,6 +673,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
rcu_read_lock();
xdp_prog = rcu_dereference(ring->xdp_prog);
+ xdp.rxq = &ring->xdp_rxq;
doorbell_pending = 0;
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
@@ -748,7 +758,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
* read bytes but not past the end of the frag.
*/
if (xdp_prog) {
- struct xdp_buff xdp;
dma_addr_t dma;
void *orig_data;
u32 act;
@@ -814,33 +823,33 @@ xdp_drop_no_cnt:
if (likely(dev->features & NETIF_F_RXCSUM)) {
if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
MLX4_CQE_STATUS_UDP)) {
- if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
- cqe->checksum == cpu_to_be16(0xffff)) {
- bool l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
- (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
-
- ip_summed = CHECKSUM_UNNECESSARY;
- hash_type = PKT_HASH_TYPE_L4;
- if (l2_tunnel)
- skb->csum_level = 1;
- ring->csum_ok++;
- } else {
+ bool l2_tunnel;
+
+ if (!((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
+ cqe->checksum == cpu_to_be16(0xffff)))
goto csum_none;
- }
+
+ l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
+ (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
+ ip_summed = CHECKSUM_UNNECESSARY;
+ hash_type = PKT_HASH_TYPE_L4;
+ if (l2_tunnel)
+ skb->csum_level = 1;
+ ring->csum_ok++;
} else {
- if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
- (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
- MLX4_CQE_STATUS_IPV6))) {
- if (check_csum(cqe, skb, va, dev->features)) {
- goto csum_none;
- } else {
- ip_summed = CHECKSUM_COMPLETE;
- hash_type = PKT_HASH_TYPE_L3;
- ring->csum_complete++;
- }
- } else {
+ if (!(priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
+ (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
+#if IS_ENABLED(CONFIG_IPV6)
+ MLX4_CQE_STATUS_IPV6))))
+#else
+ 0))))
+#endif
goto csum_none;
- }
+ if (check_csum(cqe, skb, va, dev->features))
+ goto csum_none;
+ ip_summed = CHECKSUM_COMPLETE;
+ hash_type = PKT_HASH_TYPE_L3;
+ ring->csum_complete++;
}
} else {
csum_none:
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 2b72677eccd4..f470ae37d937 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -46,6 +46,7 @@
#endif
#include <linux/cpu_rmap.h>
#include <linux/ptp_clock_kernel.h>
+#include <net/xdp.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/qp.h>
@@ -356,6 +357,7 @@ struct mlx4_en_rx_ring {
unsigned long dropped;
int hwtstamp_rx_filter;
cpumask_var_t affinity_mask;
+ struct xdp_rxq_info xdp_rxq;
};
struct mlx4_en_cq {
@@ -479,6 +481,7 @@ struct mlx4_en_frag_info {
#define MLX4_EN_BW_MIN 1
#define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */
+#define MLX4_EN_TC_VENDOR 0
#define MLX4_EN_TC_ETS 7
enum dcb_pfc_type {
@@ -719,7 +722,7 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv);
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
- u32 size, u16 stride, int node);
+ u32 size, u16 stride, int node, int queue_index);
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
u32 size, u16 stride);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index c7c0764991c9..2e84f10f59ba 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -1103,30 +1103,16 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
u32 *lkey, u32 *rkey)
{
- struct mlx4_cmd_mailbox *mailbox;
- int err;
-
if (!fmr->maps)
return;
- fmr->maps = 0;
+ /* To unmap: it is sufficient to take back ownership from HW */
+ *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW;
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox)) {
- err = PTR_ERR(mailbox);
- pr_warn("mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n", err);
- return;
- }
+ /* Make sure MPT status is visible */
+ wmb();
- err = mlx4_HW2SW_MPT(dev, NULL,
- key_to_hw_index(fmr->mr.key) &
- (dev->caps.num_mpts - 1));
- mlx4_free_cmd_mailbox(dev, mailbox);
- if (err) {
- pr_warn("mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err);
- return;
- }
- fmr->mr.enabled = MLX4_MPT_EN_SW;
+ fmr->maps = 0;
}
EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
@@ -1136,6 +1122,22 @@ int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
if (fmr->maps)
return -EBUSY;
+ if (fmr->mr.enabled == MLX4_MPT_EN_HW) {
+ /* In case of FMR was enabled and unmapped
+ * make sure to give ownership of MPT back to HW
+ * so HW2SW_MPT command will success.
+ */
+ *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW;
+ /* Make sure MPT status is visible before changing MPT fields */
+ wmb();
+ fmr->mpt->length = 0;
+ fmr->mpt->start = 0;
+ /* Make sure MPT data is visible after changing MPT status */
+ wmb();
+ *(u8 *)fmr->mpt = MLX4_MPT_STATUS_HW;
+ /* make sure MPT status is visible */
+ wmb();
+ }
ret = mlx4_mr_free(dev, &fmr->mr);
if (ret)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 19b21b40ab07..c805769d92a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -14,7 +14,7 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
fpga/ipsec.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
- en_tx.o en_rx.o en_rx_am.o en_txrx.o en_stats.o vxlan.o \
+ en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \
en_arfs.o en_fs_ethtool.o en_selftest.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index c2d89bfa1a70..4c9360b25532 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -44,8 +44,11 @@
#include <linux/mlx5/port.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/transobj.h>
+#include <linux/mlx5/fs.h>
#include <linux/rhashtable.h>
#include <net/switchdev.h>
+#include <net/xdp.h>
+#include <linux/net_dim.h>
#include "wq.h"
#include "mlx5_core.h"
#include "en_stats.h"
@@ -226,12 +229,6 @@ enum mlx5e_priv_flag {
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
#endif
-struct mlx5e_cq_moder {
- u16 usec;
- u16 pkts;
- u8 cq_period_mode;
-};
-
struct mlx5e_params {
u8 log_sq_size;
u8 rq_wq_type;
@@ -242,8 +239,8 @@ struct mlx5e_params {
u16 num_channels;
u8 num_tc;
bool rx_cqe_compress_def;
- struct mlx5e_cq_moder rx_cq_moderation;
- struct mlx5e_cq_moder tx_cq_moderation;
+ struct net_dim_cq_moder rx_cq_moderation;
+ struct net_dim_cq_moder tx_cq_moderation;
bool lro_en;
u32 lro_wqe_sz;
u16 tx_max_inline;
@@ -253,7 +250,7 @@ struct mlx5e_params {
u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
bool vlan_strip_disable;
bool scatter_fcs_en;
- bool rx_am_enabled;
+ bool rx_dim_enabled;
u32 lro_timeout;
u32 pflags;
struct bpf_prog *xdp_prog;
@@ -472,32 +469,6 @@ struct mlx5e_mpw_info {
u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
};
-struct mlx5e_rx_am_stats {
- int ppms; /* packets per msec */
- int bpms; /* bytes per msec */
- int epms; /* events per msec */
-};
-
-struct mlx5e_rx_am_sample {
- ktime_t time;
- u32 pkt_ctr;
- u32 byte_ctr;
- u16 event_ctr;
-};
-
-struct mlx5e_rx_am { /* Adaptive Moderation */
- u8 state;
- struct mlx5e_rx_am_stats prev_stats;
- struct mlx5e_rx_am_sample start_sample;
- struct work_struct work;
- u8 profile_ix;
- u8 mode;
- u8 tune_state;
- u8 steps_right;
- u8 steps_left;
- u8 tired;
-};
-
/* a single cache unit is capable to serve one napi call (for non-striding rq)
* or a MPWQE (for striding rq).
*/
@@ -558,7 +529,7 @@ struct mlx5e_rq {
unsigned long state;
int ix;
- struct mlx5e_rx_am am; /* Adaptive Moderation */
+ struct net_dim dim; /* Dynamic Interrupt Moderation */
/* XDP */
struct bpf_prog *xdp_prog;
@@ -571,6 +542,9 @@ struct mlx5e_rq {
u32 rqn;
struct mlx5_core_dev *mdev;
struct mlx5_core_mkey umr_mkey;
+
+ /* XDP read-mostly */
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_aligned_in_smp;
struct mlx5e_channel {
@@ -587,6 +561,7 @@ struct mlx5e_channel {
/* data path - accessed per napi poll */
struct irq_desc *irq_desc;
+ struct mlx5e_ch_stats stats;
/* control */
struct mlx5e_priv *priv;
@@ -655,6 +630,7 @@ struct mlx5e_tc_table {
struct rhashtable ht;
DECLARE_HASHTABLE(mod_hdr_tbl, 8);
+ DECLARE_HASHTABLE(hairpin_tbl, 8);
};
struct mlx5e_vlan_table {
@@ -722,6 +698,11 @@ enum {
MLX5E_ARFS_FT_LEVEL
};
+enum {
+ MLX5E_TC_FT_LEVEL = 0,
+ MLX5E_TC_TTC_FT_LEVEL,
+};
+
struct mlx5e_ethtool_table {
struct mlx5_flow_table *ft;
int num_rules;
@@ -860,11 +841,7 @@ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
-void mlx5e_rx_am(struct mlx5e_rq *rq);
-void mlx5e_rx_am_work(struct work_struct *work);
-struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode);
-
-void mlx5e_update_stats(struct mlx5e_priv *priv, bool full);
+void mlx5e_update_stats(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
@@ -1054,11 +1031,26 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
-int mlx5e_create_ttc_table(struct mlx5e_priv *priv);
-void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv);
+struct ttc_params {
+ struct mlx5_flow_table_attr ft_attr;
+ u32 any_tt_tirn;
+ u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
+ struct mlx5e_ttc_table *inner_ttc;
+};
+
+void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params);
+void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params);
+void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params);
-int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv);
-void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv);
+int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc);
+void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
+ struct mlx5e_ttc_table *ttc);
+
+int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc);
+void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
+ struct mlx5e_ttc_table *ttc);
int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
u32 underlay_qpn, u32 *tisn);
@@ -1071,6 +1063,8 @@ int mlx5e_open(struct net_device *netdev);
void mlx5e_update_stats_work(struct work_struct *work);
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
+int mlx5e_bits_invert(unsigned long a, int size);
+
/* ethtool helpers */
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo);
@@ -1110,4 +1104,5 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
u16 max_channels);
u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
+void mlx5e_rx_dim_work(struct work_struct *work);
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
new file mode 100644
index 000000000000..602851ab5b14
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/net_dim.h>
+#include "en.h"
+
+void mlx5e_rx_dim_work(struct work_struct *work)
+{
+ struct net_dim *dim = container_of(work, struct net_dim,
+ work);
+ struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim);
+ struct net_dim_cq_moder cur_profile = net_dim_get_profile(dim->mode,
+ dim->profile_ix);
+
+ mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq,
+ cur_profile.usec, cur_profile.pkts);
+
+ dim->state = NET_DIM_START_MEASURE;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index ea5fff2c3143..cc8048f68f11 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -207,7 +207,7 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
return;
mutex_lock(&priv->state_lock);
- mlx5e_update_stats(priv, true);
+ mlx5e_update_stats(priv);
mutex_unlock(&priv->state_lock);
for (i = 0; i < mlx5e_num_stats_grps; i++)
@@ -295,7 +295,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
struct mlx5e_channels new_channels = {};
u32 rx_pending_wqes;
u32 min_rq_size;
- u32 max_rq_size;
u8 log_rq_size;
u8 log_sq_size;
u32 num_mtts;
@@ -314,8 +313,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
1 << mlx5_min_log_rq_size(rq_wq_type));
- max_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
- 1 << mlx5_max_log_rq_size(rq_wq_type));
rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type,
param->rx_pending);
@@ -325,12 +322,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
min_rq_size);
return -EINVAL;
}
- if (param->rx_pending > max_rq_size) {
- netdev_info(priv->netdev, "%s: rx_pending (%d) > max (%d)\n",
- __func__, param->rx_pending,
- max_rq_size);
- return -EINVAL;
- }
num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes);
if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
@@ -346,12 +337,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
return -EINVAL;
}
- if (param->tx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE)) {
- netdev_info(priv->netdev, "%s: tx_pending (%d) > max (%d)\n",
- __func__, param->tx_pending,
- 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE);
- return -EINVAL;
- }
log_rq_size = order_base_2(rx_pending_wqes);
log_sq_size = order_base_2(param->tx_pending);
@@ -479,7 +464,7 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
coal->rx_max_coalesced_frames = priv->channels.params.rx_cq_moderation.pkts;
coal->tx_coalesce_usecs = priv->channels.params.tx_cq_moderation.usec;
coal->tx_max_coalesced_frames = priv->channels.params.tx_cq_moderation.pkts;
- coal->use_adaptive_rx_coalesce = priv->channels.params.rx_am_enabled;
+ coal->use_adaptive_rx_coalesce = priv->channels.params.rx_dim_enabled;
return 0;
}
@@ -533,7 +518,7 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
new_channels.params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames;
new_channels.params.rx_cq_moderation.usec = coal->rx_coalesce_usecs;
new_channels.params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames;
- new_channels.params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce;
+ new_channels.params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
@@ -541,7 +526,7 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
}
/* we are opened */
- reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_am_enabled;
+ reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
if (!reset) {
mlx5e_set_priv_channels_coalesce(priv, coal);
priv->channels.params = new_channels.params;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index def513484845..f64dda2bed31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -806,25 +806,25 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
return err ? ERR_PTR(err) : rule;
}
-static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
+static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv,
+ struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc)
{
struct mlx5_flow_destination dest = {};
- struct mlx5e_ttc_table *ttc;
struct mlx5_flow_handle **rules;
struct mlx5_flow_table *ft;
int tt;
int err;
- ttc = &priv->fs.ttc;
ft = ttc->ft.t;
rules = ttc->rules;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
if (tt == MLX5E_TT_ANY)
- dest.tir_num = priv->direct_tir[0].tirn;
+ dest.tir_num = params->any_tt_tirn;
else
- dest.tir_num = priv->indir_tir[tt].tirn;
+ dest.tir_num = params->indir_tirn[tt];
rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
ttc_rules[tt].etype,
ttc_rules[tt].proto);
@@ -832,12 +832,12 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
goto del_rules;
}
- if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
+ if (!params->inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
return 0;
rules = ttc->tunnel_rules;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = priv->fs.inner_ttc.ft.t;
+ dest.ft = params->inner_ttc->ft.t;
for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
ttc_tunnel_rules[tt].etype,
@@ -977,25 +977,25 @@ mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv,
return err ? ERR_PTR(err) : rule;
}
-static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv)
+static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv,
+ struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc)
{
struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle **rules;
- struct mlx5e_ttc_table *ttc;
struct mlx5_flow_table *ft;
int err;
int tt;
- ttc = &priv->fs.inner_ttc;
ft = ttc->ft.t;
rules = ttc->rules;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
if (tt == MLX5E_TT_ANY)
- dest.tir_num = priv->direct_tir[0].tirn;
+ dest.tir_num = params->any_tt_tirn;
else
- dest.tir_num = priv->inner_indir_tir[tt].tirn;
+ dest.tir_num = params->indir_tirn[tt];
rules[tt] = mlx5e_generate_inner_ttc_rule(priv, ft, &dest,
ttc_rules[tt].etype,
@@ -1075,21 +1075,42 @@ err:
return err;
}
-int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
+void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv,
+ struct ttc_params *ttc_params)
+{
+ ttc_params->any_tt_tirn = priv->direct_tir[0].tirn;
+ ttc_params->inner_ttc = &priv->fs.inner_ttc;
+}
+
+void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params)
+{
+ struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
+
+ ft_attr->max_fte = MLX5E_INNER_TTC_TABLE_SIZE;
+ ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
+ ft_attr->prio = MLX5E_NIC_PRIO;
+}
+
+void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params)
+
+{
+ struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
+
+ ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
+ ft_attr->level = MLX5E_TTC_FT_LEVEL;
+ ft_attr->prio = MLX5E_NIC_PRIO;
+}
+
+int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc)
{
- struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc;
- struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_flow_table *ft = &ttc->ft;
int err;
if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
return 0;
- ft_attr.max_fte = MLX5E_INNER_TTC_TABLE_SIZE;
- ft_attr.level = MLX5E_INNER_TTC_FT_LEVEL;
- ft_attr.prio = MLX5E_NIC_PRIO;
-
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(priv->fs.ns, &params->ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -1100,7 +1121,7 @@ int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
if (err)
goto err;
- err = mlx5e_generate_inner_ttc_table_rules(priv);
+ err = mlx5e_generate_inner_ttc_table_rules(priv, params, ttc);
if (err)
goto err;
@@ -1111,10 +1132,9 @@ err:
return err;
}
-void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
+void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
+ struct mlx5e_ttc_table *ttc)
{
- struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc;
-
if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
return;
@@ -1122,27 +1142,21 @@ void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
mlx5e_destroy_flow_table(&ttc->ft);
}
-void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
+void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
+ struct mlx5e_ttc_table *ttc)
{
- struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
-
mlx5e_cleanup_ttc_rules(ttc);
mlx5e_destroy_flow_table(&ttc->ft);
}
-int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
+int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc)
{
bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
- struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
- struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_flow_table *ft = &ttc->ft;
int err;
- ft_attr.max_fte = MLX5E_TTC_TABLE_SIZE;
- ft_attr.level = MLX5E_TTC_FT_LEVEL;
- ft_attr.prio = MLX5E_NIC_PRIO;
-
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(priv->fs.ns, &params->ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -1153,7 +1167,7 @@ int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
if (err)
goto err;
- err = mlx5e_generate_ttc_table_rules(priv);
+ err = mlx5e_generate_ttc_table_rules(priv, params, ttc);
if (err)
goto err;
@@ -1474,7 +1488,8 @@ static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
{
- int err;
+ struct ttc_params ttc_params = {};
+ int tt, err;
priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
@@ -1489,14 +1504,23 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
}
- err = mlx5e_create_inner_ttc_table(priv);
+ mlx5e_set_ttc_basic_params(priv, &ttc_params);
+ mlx5e_set_inner_ttc_ft_params(&ttc_params);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
+
+ err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
if (err) {
netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
err);
goto err_destroy_arfs_tables;
}
- err = mlx5e_create_ttc_table(priv);
+ mlx5e_set_ttc_ft_params(&ttc_params);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
+
+ err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
if (err) {
netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
err);
@@ -1524,9 +1548,9 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
err_destroy_l2_table:
mlx5e_destroy_l2_table(priv);
err_destroy_ttc_table:
- mlx5e_destroy_ttc_table(priv);
+ mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
err_destroy_inner_ttc_table:
- mlx5e_destroy_inner_ttc_table(priv);
+ mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
err_destroy_arfs_tables:
mlx5e_arfs_destroy_tables(priv);
@@ -1537,8 +1561,8 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
{
mlx5e_destroy_vlan_table(priv);
mlx5e_destroy_l2_table(priv);
- mlx5e_destroy_ttc_table(priv);
- mlx5e_destroy_inner_ttc_table(priv);
+ mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
+ mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
mlx5e_arfs_destroy_tables(priv);
mlx5e_ethtool_cleanup_steering(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index d8aefeed124d..47bab842c5ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -173,182 +173,23 @@ unlock:
rtnl_unlock();
}
-static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
+void mlx5e_update_stats(struct mlx5e_priv *priv)
{
- struct mlx5e_sw_stats temp, *s = &temp;
- struct mlx5e_rq_stats *rq_stats;
- struct mlx5e_sq_stats *sq_stats;
- int i, j;
-
- memset(s, 0, sizeof(*s));
- for (i = 0; i < priv->channels.num; i++) {
- struct mlx5e_channel *c = priv->channels.c[i];
-
- rq_stats = &c->rq.stats;
-
- s->rx_packets += rq_stats->packets;
- s->rx_bytes += rq_stats->bytes;
- s->rx_lro_packets += rq_stats->lro_packets;
- s->rx_lro_bytes += rq_stats->lro_bytes;
- s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
- s->rx_csum_none += rq_stats->csum_none;
- s->rx_csum_complete += rq_stats->csum_complete;
- s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
- s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
- s->rx_xdp_drop += rq_stats->xdp_drop;
- s->rx_xdp_tx += rq_stats->xdp_tx;
- s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
- s->rx_wqe_err += rq_stats->wqe_err;
- s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
- s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
- s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
- s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
- s->rx_page_reuse += rq_stats->page_reuse;
- s->rx_cache_reuse += rq_stats->cache_reuse;
- s->rx_cache_full += rq_stats->cache_full;
- s->rx_cache_empty += rq_stats->cache_empty;
- s->rx_cache_busy += rq_stats->cache_busy;
- s->rx_cache_waive += rq_stats->cache_waive;
-
- for (j = 0; j < priv->channels.params.num_tc; j++) {
- sq_stats = &c->sq[j].stats;
-
- s->tx_packets += sq_stats->packets;
- s->tx_bytes += sq_stats->bytes;
- s->tx_tso_packets += sq_stats->tso_packets;
- s->tx_tso_bytes += sq_stats->tso_bytes;
- s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
- s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
- s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
- s->tx_queue_stopped += sq_stats->stopped;
- s->tx_queue_wake += sq_stats->wake;
- s->tx_queue_dropped += sq_stats->dropped;
- s->tx_xmit_more += sq_stats->xmit_more;
- s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
- s->tx_csum_none += sq_stats->csum_none;
- s->tx_csum_partial += sq_stats->csum_partial;
- }
- }
-
- s->link_down_events_phy = MLX5_GET(ppcnt_reg,
- priv->stats.pport.phy_counters,
- counter_set.phys_layer_cntrs.link_down_events);
- memcpy(&priv->stats.sw, s, sizeof(*s));
-}
-
-static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
-{
- int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
- u32 *out = (u32 *)priv->stats.vport.query_vport_out;
- u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
- struct mlx5_core_dev *mdev = priv->mdev;
-
- MLX5_SET(query_vport_counter_in, in, opcode,
- MLX5_CMD_OP_QUERY_VPORT_COUNTER);
- MLX5_SET(query_vport_counter_in, in, op_mod, 0);
- MLX5_SET(query_vport_counter_in, in, other_vport, 0);
-
- mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
-}
-
-static void mlx5e_update_pport_counters(struct mlx5e_priv *priv, bool full)
-{
- struct mlx5e_pport_stats *pstats = &priv->stats.pport;
- struct mlx5_core_dev *mdev = priv->mdev;
- u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
- int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
- int prio;
- void *out;
-
- MLX5_SET(ppcnt_reg, in, local_port, 1);
-
- out = pstats->IEEE_802_3_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
-
- if (!full)
- return;
-
- out = pstats->RFC_2863_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
-
- out = pstats->RFC_2819_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
-
- out = pstats->phy_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
-
- if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
- out = pstats->phy_statistical_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
- }
-
- if (MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters)) {
- out = pstats->eth_ext_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
- }
-
- MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
- for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
- out = pstats->per_prio_counters[prio];
- MLX5_SET(ppcnt_reg, in, prio_tc, prio);
- mlx5_core_access_reg(mdev, in, sz, out, sz,
- MLX5_REG_PPCNT, 0, 0);
- }
-}
-
-static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
-{
- struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
- u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
- int err;
-
- if (!priv->q_counter)
- return;
-
- err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out));
- if (err)
- return;
-
- qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer);
-}
-
-static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
-{
- struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
- struct mlx5_core_dev *mdev = priv->mdev;
- u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
- int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
- void *out;
-
- if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
- return;
-
- out = pcie_stats->pcie_perf_counters;
- MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
-}
+ int i;
-void mlx5e_update_stats(struct mlx5e_priv *priv, bool full)
-{
- if (full) {
- mlx5e_update_pcie_counters(priv);
- mlx5e_ipsec_update_stats(priv);
- }
- mlx5e_update_pport_counters(priv, full);
- mlx5e_update_vport_counters(priv);
- mlx5e_update_q_counter(priv);
- mlx5e_update_sw_counters(priv);
+ for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
+ if (mlx5e_stats_grps[i].update_stats)
+ mlx5e_stats_grps[i].update_stats(priv);
}
static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
{
- mlx5e_update_stats(priv, false);
+ int i;
+
+ for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
+ if (mlx5e_stats_grps[i].update_stats_mask &
+ MLX5E_NDO_UPDATE_STATS)
+ mlx5e_stats_grps[i].update_stats(priv);
}
void mlx5e_update_stats_work(struct work_struct *work)
@@ -582,6 +423,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_rq_wq_destroy;
}
+ err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix);
+ if (err < 0)
+ goto err_rq_wq_destroy;
+
rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
rq->buff.headroom = params->rq_headroom;
@@ -674,8 +519,17 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
wqe->data.lkey = rq->mkey_be;
}
- INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
- rq->am.mode = params->rx_cq_moderation.cq_period_mode;
+ INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
+
+ switch (params->rx_cq_moderation.cq_period_mode) {
+ case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
+ rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE;
+ break;
+ case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
+ default:
+ rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ }
+
rq->page_cache.head = 0;
rq->page_cache.tail = 0;
@@ -687,6 +541,7 @@ err_destroy_umr_mkey:
err_rq_wq_destroy:
if (rq->xdp_prog)
bpf_prog_put(rq->xdp_prog);
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
mlx5_wq_destroy(&rq->wq_ctrl);
return err;
@@ -699,6 +554,8 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
if (rq->xdp_prog)
bpf_prog_put(rq->xdp_prog);
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
+
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
mlx5e_rq_free_mpwqe_info(rq);
@@ -919,7 +776,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
if (err)
goto err_destroy_rq;
- if (params->rx_am_enabled)
+ if (params->rx_dim_enabled)
c->rq.state |= BIT(MLX5E_RQ_STATE_AM);
return 0;
@@ -952,7 +809,7 @@ static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
static void mlx5e_close_rq(struct mlx5e_rq *rq)
{
- cancel_work_sync(&rq->am.work);
+ cancel_work_sync(&rq->dim.work);
mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq);
mlx5e_free_rq(rq);
@@ -1565,7 +1422,7 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
}
static int mlx5e_open_cq(struct mlx5e_channel *c,
- struct mlx5e_cq_moder moder,
+ struct net_dim_cq_moder moder,
struct mlx5e_cq_param *param,
struct mlx5e_cq *cq)
{
@@ -1747,7 +1604,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_channel_param *cparam,
struct mlx5e_channel **cp)
{
- struct mlx5e_cq_moder icocq_moder = {0, 0};
+ struct net_dim_cq_moder icocq_moder = {0, 0};
struct net_device *netdev = priv->netdev;
int cpu = mlx5e_get_cpu(priv, ix);
struct mlx5e_channel *c;
@@ -1999,7 +1856,7 @@ static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
mlx5e_build_common_cq_param(priv, param);
- param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ param->cq_period_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
@@ -2203,7 +2060,7 @@ static int mlx5e_rx_hash_fn(int hfunc)
MLX5_RX_HASH_FN_INVERTED_XOR8;
}
-static int mlx5e_bits_invert(unsigned long a, int size)
+int mlx5e_bits_invert(unsigned long a, int size)
{
int inv = 0;
int i;
@@ -2765,6 +2622,9 @@ static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
if (err)
return err;
+ /* Mark as unused given "Drop-RQ" packets never reach XDP */
+ xdp_rxq_info_unused(&rq->xdp_rxq);
+
rq->mdev = mdev;
return 0;
@@ -3084,9 +2944,6 @@ out:
static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *cls_flower)
{
- if (cls_flower->common.chain_index)
- return -EOPNOTSUPP;
-
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
return mlx5e_configure_flower(priv, cls_flower);
@@ -3104,7 +2961,7 @@ int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
{
struct mlx5e_priv *priv = cb_priv;
- if (!tc_can_offload(priv->netdev))
+ if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -3738,26 +3595,62 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
return features;
}
+static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev,
+ struct mlx5e_txqsq *sq)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int irqn_not_used, eqn;
+ struct mlx5_eq *eq;
+ u32 eqe_count;
+
+ if (mlx5_vector2eqn(mdev, sq->cq.mcq.vector, &eqn, &irqn_not_used))
+ return false;
+
+ eq = mlx5_eqn2eq(mdev, eqn);
+ if (IS_ERR(eq))
+ return false;
+
+ netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
+ eqn, eq->cons_index, eq->irqn);
+
+ eqe_count = mlx5_eq_poll_irq_disabled(eq);
+ if (!eqe_count)
+ return false;
+
+ netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->eqn);
+ sq->channel->stats.eq_rearm++;
+ return true;
+}
+
static void mlx5e_tx_timeout(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- bool sched_work = false;
+ bool reopen_channels = false;
int i;
netdev_err(dev, "TX timeout detected\n");
for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
+ struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, i);
struct mlx5e_txqsq *sq = priv->txq2sq[i];
- if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
+ if (!netif_xmit_stopped(dev_queue))
continue;
- sched_work = true;
- clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
- i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
+ netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
+ i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
+ jiffies_to_usecs(jiffies - dev_queue->trans_start));
+
+ /* If we recover a lost interrupt, most likely TX timeout will
+ * be resolved, skip reopening channels
+ */
+ if (!mlx5e_tx_timeout_eq_recover(dev, sq)) {
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+ reopen_channels = true;
+ }
}
- if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
+ if (reopen_channels && test_bit(MLX5E_STATE_OPENED, &priv->state))
schedule_work(&priv->tx_timeout_work);
}
@@ -4044,9 +3937,18 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
params->rx_cq_moderation.usec =
MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
- if (params->rx_am_enabled)
- params->rx_cq_moderation =
- mlx5e_am_get_def_profile(cq_period_mode);
+ if (params->rx_dim_enabled) {
+ switch (cq_period_mode) {
+ case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
+ params->rx_cq_moderation =
+ net_dim_get_def_profile(NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE);
+ break;
+ case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
+ default:
+ params->rx_cq_moderation =
+ net_dim_get_def_profile(NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE);
+ }
+ }
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
params->rx_cq_moderation.cq_period_mode ==
@@ -4108,7 +4010,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
- params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+ params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
mlx5e_set_tx_cq_mode_params(params, cq_period_mode);
@@ -4315,9 +4217,6 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
mlx5e_ipsec_cleanup(priv);
mlx5e_vxlan_cleanup(priv);
-
- if (priv->channels.params.xdp_prog)
- bpf_prog_put(priv->channels.params.xdp_prog);
}
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 3409d86eb06b..363d8dcb7f17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -190,6 +190,63 @@ int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
return 0;
}
+static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5e_rep_sq *rep_sq, *tmp;
+ struct mlx5e_rep_priv *rpriv;
+
+ if (esw->mode != SRIOV_OFFLOADS)
+ return;
+
+ rpriv = mlx5e_rep_to_rep_priv(rep);
+ list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
+ mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
+ list_del(&rep_sq->list);
+ kfree(rep_sq);
+ }
+}
+
+static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep,
+ u16 *sqns_array, int sqns_num)
+{
+ struct mlx5_flow_handle *flow_rule;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5e_rep_sq *rep_sq;
+ int err;
+ int i;
+
+ if (esw->mode != SRIOV_OFFLOADS)
+ return 0;
+
+ rpriv = mlx5e_rep_to_rep_priv(rep);
+ for (i = 0; i < sqns_num; i++) {
+ rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
+ if (!rep_sq) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ /* Add re-inject rule to the PF/representor sqs */
+ flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
+ rep->vport,
+ sqns_array[i]);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ kfree(rep_sq);
+ goto out_err;
+ }
+ rep_sq->send_to_vport_rule = flow_rule;
+ list_add(&rep_sq->list, &rpriv->vport_sqs_list);
+ }
+ return 0;
+
+out_err:
+ mlx5e_sqs2vport_stop(esw, rep);
+ return err;
+}
+
int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
@@ -210,7 +267,7 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
sqs[num_sqs++] = c->sq[tc].sqn;
}
- err = mlx5_eswitch_sqs2vport_start(esw, rep, sqs, num_sqs);
+ err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
kfree(sqs);
out:
@@ -225,7 +282,7 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
- mlx5_eswitch_sqs2vport_stop(esw, rep);
+ mlx5e_sqs2vport_stop(esw, rep);
}
static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
@@ -238,7 +295,7 @@ static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
#endif
unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms,
DELAY_PROBE_TIME);
- struct net_device *netdev = rpriv->rep->netdev;
+ struct net_device *netdev = rpriv->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
@@ -259,7 +316,7 @@ static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
{
struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
neigh_update.neigh_stats_work.work);
- struct net_device *netdev = rpriv->rep->netdev;
+ struct net_device *netdev = rpriv->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_neigh_hash_entry *nhe;
@@ -355,7 +412,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
neigh_update.netevent_nb);
struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
- struct net_device *netdev = rpriv->rep->netdev;
+ struct net_device *netdev = rpriv->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_neigh_hash_entry *nhe = NULL;
struct mlx5e_neigh m_neigh = {};
@@ -483,7 +540,7 @@ out_err:
static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
{
struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
- struct mlx5e_priv *priv = netdev_priv(rpriv->rep->netdev);
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
unregister_netevent_notifier(&neigh_update->netevent_nb);
@@ -662,9 +719,6 @@ static int
mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *cls_flower)
{
- if (cls_flower->common.chain_index)
- return -EOPNOTSUPP;
-
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
return mlx5e_configure_flower(priv, cls_flower);
@@ -682,7 +736,7 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
{
struct mlx5e_priv *priv = cb_priv;
- if (!tc_can_offload(priv->netdev))
+ if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -827,7 +881,7 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
- params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+ params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
@@ -906,7 +960,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
err = PTR_ERR(flow_rule);
goto err_destroy_direct_tirs;
}
- rep->vport_rx_rule = flow_rule;
+ rpriv->vport_rx_rule = flow_rule;
err = mlx5e_tc_init(priv);
if (err)
@@ -915,7 +969,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
return 0;
err_del_flow_rule:
- mlx5_del_flow_rules(rep->vport_rx_rule);
+ mlx5_del_flow_rules(rpriv->vport_rx_rule);
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv);
err_destroy_direct_rqts:
@@ -926,10 +980,9 @@ err_destroy_direct_rqts:
static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_eswitch_rep *rep = rpriv->rep;
mlx5e_tc_cleanup(priv);
- mlx5_del_flow_rules(rep->vport_rx_rule);
+ mlx5_del_flow_rules(rpriv->vport_rx_rule);
mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_direct_rqts(priv);
}
@@ -969,10 +1022,10 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
/* e-Switch vport representors */
static int
-mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
- struct mlx5e_priv *priv = netdev_priv(rep->netdev);
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
int err;
@@ -994,10 +1047,10 @@ err_remove_sqs:
}
static void
-mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+mlx5e_nic_rep_unload(struct mlx5_eswitch_rep *rep)
{
- struct mlx5e_priv *priv = netdev_priv(rep->netdev);
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
mlx5e_remove_sqs_fwd_rules(priv);
@@ -1010,8 +1063,9 @@ mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
}
static int
-mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
+ struct mlx5e_rep_priv *uplink_rpriv;
struct mlx5e_rep_priv *rpriv;
struct net_device *netdev;
struct mlx5e_priv *upriv;
@@ -1021,7 +1075,7 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
if (!rpriv)
return -ENOMEM;
- netdev = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rpriv);
+ netdev = mlx5e_create_netdev(dev, &mlx5e_rep_profile, rpriv);
if (!netdev) {
pr_warn("Failed to create representor netdev for vport %d\n",
rep->vport);
@@ -1029,8 +1083,10 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
return -EINVAL;
}
- rep->netdev = netdev;
+ rpriv->netdev = netdev;
rpriv->rep = rep;
+ rep->rep_if[REP_ETH].priv = rpriv;
+ INIT_LIST_HEAD(&rpriv->vport_sqs_list);
err = mlx5e_attach_netdev(netdev_priv(netdev));
if (err) {
@@ -1046,7 +1102,8 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
goto err_detach_netdev;
}
- upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw));
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(dev->priv.eswitch, REP_ETH);
+ upriv = netdev_priv(uplink_rpriv->netdev);
err = tc_setup_cb_egdev_register(netdev, mlx5e_setup_tc_block_cb,
upriv);
if (err)
@@ -1078,16 +1135,19 @@ err_destroy_netdev:
}
static void
-mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
{
- struct net_device *netdev = rep->netdev;
+ struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+ struct net_device *netdev = rpriv->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5e_rep_priv *uplink_rpriv;
void *ppriv = priv->ppriv;
struct mlx5e_priv *upriv;
- unregister_netdev(rep->netdev);
- upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw));
+ unregister_netdev(netdev);
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch,
+ REP_ETH);
+ upriv = netdev_priv(uplink_rpriv->netdev);
tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
upriv);
mlx5e_rep_neigh_cleanup(rpriv);
@@ -1102,18 +1162,13 @@ static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv)
struct mlx5_eswitch *esw = mdev->priv.eswitch;
int total_vfs = MLX5_TOTAL_VPORTS(mdev);
int vport;
- u8 mac[ETH_ALEN];
-
- mlx5_query_nic_vport_mac_address(mdev, 0, mac);
for (vport = 1; vport < total_vfs; vport++) {
- struct mlx5_eswitch_rep rep;
+ struct mlx5_eswitch_rep_if rep_if = {};
- rep.load = mlx5e_vport_rep_load;
- rep.unload = mlx5e_vport_rep_unload;
- rep.vport = vport;
- ether_addr_copy(rep.hw_id, mac);
- mlx5_eswitch_register_vport_rep(esw, vport, &rep);
+ rep_if.load = mlx5e_vport_rep_load;
+ rep_if.unload = mlx5e_vport_rep_unload;
+ mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_ETH);
}
}
@@ -1125,21 +1180,24 @@ static void mlx5e_rep_unregister_vf_vports(struct mlx5e_priv *priv)
int vport;
for (vport = 1; vport < total_vfs; vport++)
- mlx5_eswitch_unregister_vport_rep(esw, vport);
+ mlx5_eswitch_unregister_vport_rep(esw, vport, REP_ETH);
}
void mlx5e_register_vport_reps(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_eswitch *esw = mdev->priv.eswitch;
- struct mlx5_eswitch_rep rep;
+ struct mlx5_eswitch_rep_if rep_if;
+ struct mlx5e_rep_priv *rpriv;
+
+ rpriv = priv->ppriv;
+ rpriv->netdev = priv->netdev;
- mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
- rep.load = mlx5e_nic_rep_load;
- rep.unload = mlx5e_nic_rep_unload;
- rep.vport = FDB_UPLINK_VPORT;
- rep.netdev = priv->netdev;
- mlx5_eswitch_register_vport_rep(esw, 0, &rep); /* UPLINK PF vport*/
+ rep_if.load = mlx5e_nic_rep_load;
+ rep_if.unload = mlx5e_nic_rep_unload;
+ rep_if.priv = rpriv;
+ INIT_LIST_HEAD(&rpriv->vport_sqs_list);
+ mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_ETH); /* UPLINK PF vport*/
mlx5e_rep_register_vf_vports(priv); /* VFs vports */
}
@@ -1150,7 +1208,7 @@ void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv)
struct mlx5_eswitch *esw = mdev->priv.eswitch;
mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */
- mlx5_eswitch_unregister_vport_rep(esw, 0); /* UPLINK PF*/
+ mlx5_eswitch_unregister_vport_rep(esw, 0, REP_ETH); /* UPLINK PF*/
}
void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 5659ed9f51e6..b9b481f2833a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -56,8 +56,17 @@ struct mlx5e_neigh_update_table {
struct mlx5e_rep_priv {
struct mlx5_eswitch_rep *rep;
struct mlx5e_neigh_update_table neigh_update;
+ struct net_device *netdev;
+ struct mlx5_flow_handle *vport_rx_rule;
+ struct list_head vport_sqs_list;
};
+static inline
+struct mlx5e_rep_priv *mlx5e_rep_to_rep_priv(struct mlx5_eswitch_rep *rep)
+{
+ return (struct mlx5e_rep_priv *)rep->rep_if[REP_ETH].priv;
+}
+
struct mlx5e_neigh {
struct net_device *dev;
union {
@@ -124,6 +133,11 @@ struct mlx5e_encap_entry {
int encap_size;
};
+struct mlx5e_rep_sq {
+ struct mlx5_flow_handle *send_to_vport_rule;
+ struct list_head list;
+};
+
void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev);
void mlx5e_register_vport_reps(struct mlx5e_priv *priv);
void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 5b499c7a698f..0d4bb0688faa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -495,8 +495,8 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
mlx5_cqwq_pop(&cq->wq);
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) {
- WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n",
- cqe->op_own);
+ netdev_WARN_ONCE(cq->channel->netdev,
+ "Bad OP in ICOSQ CQE: 0x%x\n", cqe->op_own);
return;
}
@@ -506,9 +506,8 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
}
if (unlikely(icowi->opcode != MLX5_OPCODE_NOP))
- WARN_ONCE(true,
- "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n",
- icowi->opcode);
+ netdev_WARN_ONCE(cq->channel->netdev,
+ "Bad OPCODE in ICOSQ WQE info: 0x%x\n", icowi->opcode);
}
static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
@@ -632,7 +631,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
return;
}
- if (is_last_ethertype_ip(skb, &network_depth)) {
+ if (likely(is_last_ethertype_ip(skb, &network_depth))) {
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
if (network_depth > ETH_HLEN)
@@ -812,6 +811,7 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + *len;
xdp.data_hard_start = va;
+ xdp.rxq = &rq->xdp_rxq;
act = bpf_prog_run_xdp(prog, &xdp);
switch (act) {
@@ -1175,7 +1175,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
u32 cqe_bcnt,
struct sk_buff *skb)
{
+ struct hwtstamp_config *tstamp;
struct net_device *netdev;
+ struct mlx5e_priv *priv;
char *pseudo_header;
u32 qpn;
u8 *dgid;
@@ -1194,6 +1196,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
return;
}
+ priv = mlx5i_epriv(netdev);
+ tstamp = &priv->tstamp;
+
g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
if ((!g) || dgid[0] != 0xff)
@@ -1214,7 +1219,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
- if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
+ if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
skb_hwtstamps(skb)->hwtstamp =
mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
deleted file mode 100644
index b69a705fd787..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ /dev/null
@@ -1,347 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "en.h"
-
-/* Adaptive moderation profiles */
-#define MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
-#define MLX5E_RX_AM_DEF_PROFILE_CQE 1
-#define MLX5E_RX_AM_DEF_PROFILE_EQE 1
-#define MLX5E_PARAMS_AM_NUM_PROFILES 5
-
-/* All profiles sizes must be MLX5E_PARAMS_AM_NUM_PROFILES */
-#define MLX5_AM_EQE_PROFILES { \
- {1, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {8, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {64, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {128, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {256, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
-}
-
-#define MLX5_AM_CQE_PROFILES { \
- {2, 256}, \
- {8, 128}, \
- {16, 64}, \
- {32, 64}, \
- {64, 64} \
-}
-
-static const struct mlx5e_cq_moder
-profile[MLX5_CQ_PERIOD_NUM_MODES][MLX5E_PARAMS_AM_NUM_PROFILES] = {
- MLX5_AM_EQE_PROFILES,
- MLX5_AM_CQE_PROFILES,
-};
-
-static inline struct mlx5e_cq_moder mlx5e_am_get_profile(u8 cq_period_mode, int ix)
-{
- struct mlx5e_cq_moder cq_moder;
-
- cq_moder = profile[cq_period_mode][ix];
- cq_moder.cq_period_mode = cq_period_mode;
- return cq_moder;
-}
-
-struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode)
-{
- int default_profile_ix;
-
- if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
- default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_CQE;
- else /* MLX5_CQ_PERIOD_MODE_START_FROM_EQE */
- default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_EQE;
-
- return mlx5e_am_get_profile(rx_cq_period_mode, default_profile_ix);
-}
-
-/* Adaptive moderation logic */
-enum {
- MLX5E_AM_START_MEASURE,
- MLX5E_AM_MEASURE_IN_PROGRESS,
- MLX5E_AM_APPLY_NEW_PROFILE,
-};
-
-enum {
- MLX5E_AM_PARKING_ON_TOP,
- MLX5E_AM_PARKING_TIRED,
- MLX5E_AM_GOING_RIGHT,
- MLX5E_AM_GOING_LEFT,
-};
-
-enum {
- MLX5E_AM_STATS_WORSE,
- MLX5E_AM_STATS_SAME,
- MLX5E_AM_STATS_BETTER,
-};
-
-enum {
- MLX5E_AM_STEPPED,
- MLX5E_AM_TOO_TIRED,
- MLX5E_AM_ON_EDGE,
-};
-
-static bool mlx5e_am_on_top(struct mlx5e_rx_am *am)
-{
- switch (am->tune_state) {
- case MLX5E_AM_PARKING_ON_TOP:
- case MLX5E_AM_PARKING_TIRED:
- return true;
- case MLX5E_AM_GOING_RIGHT:
- return (am->steps_left > 1) && (am->steps_right == 1);
- default: /* MLX5E_AM_GOING_LEFT */
- return (am->steps_right > 1) && (am->steps_left == 1);
- }
-}
-
-static void mlx5e_am_turn(struct mlx5e_rx_am *am)
-{
- switch (am->tune_state) {
- case MLX5E_AM_PARKING_ON_TOP:
- case MLX5E_AM_PARKING_TIRED:
- break;
- case MLX5E_AM_GOING_RIGHT:
- am->tune_state = MLX5E_AM_GOING_LEFT;
- am->steps_left = 0;
- break;
- case MLX5E_AM_GOING_LEFT:
- am->tune_state = MLX5E_AM_GOING_RIGHT;
- am->steps_right = 0;
- break;
- }
-}
-
-static int mlx5e_am_step(struct mlx5e_rx_am *am)
-{
- if (am->tired == (MLX5E_PARAMS_AM_NUM_PROFILES * 2))
- return MLX5E_AM_TOO_TIRED;
-
- switch (am->tune_state) {
- case MLX5E_AM_PARKING_ON_TOP:
- case MLX5E_AM_PARKING_TIRED:
- break;
- case MLX5E_AM_GOING_RIGHT:
- if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1))
- return MLX5E_AM_ON_EDGE;
- am->profile_ix++;
- am->steps_right++;
- break;
- case MLX5E_AM_GOING_LEFT:
- if (am->profile_ix == 0)
- return MLX5E_AM_ON_EDGE;
- am->profile_ix--;
- am->steps_left++;
- break;
- }
-
- am->tired++;
- return MLX5E_AM_STEPPED;
-}
-
-static void mlx5e_am_park_on_top(struct mlx5e_rx_am *am)
-{
- am->steps_right = 0;
- am->steps_left = 0;
- am->tired = 0;
- am->tune_state = MLX5E_AM_PARKING_ON_TOP;
-}
-
-static void mlx5e_am_park_tired(struct mlx5e_rx_am *am)
-{
- am->steps_right = 0;
- am->steps_left = 0;
- am->tune_state = MLX5E_AM_PARKING_TIRED;
-}
-
-static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am)
-{
- am->tune_state = am->profile_ix ? MLX5E_AM_GOING_LEFT :
- MLX5E_AM_GOING_RIGHT;
- mlx5e_am_step(am);
-}
-
-#define IS_SIGNIFICANT_DIFF(val, ref) \
- (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
-
-static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
- struct mlx5e_rx_am_stats *prev)
-{
- if (!prev->bpms)
- return curr->bpms ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_SAME;
-
- if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
- return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_WORSE;
-
- if (!prev->ppms)
- return curr->ppms ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_SAME;
-
- if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
- return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_WORSE;
- if (!prev->epms)
- return MLX5E_AM_STATS_SAME;
-
- if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
- return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_WORSE;
-
- return MLX5E_AM_STATS_SAME;
-}
-
-static bool mlx5e_am_decision(struct mlx5e_rx_am_stats *curr_stats,
- struct mlx5e_rx_am *am)
-{
- int prev_state = am->tune_state;
- int prev_ix = am->profile_ix;
- int stats_res;
- int step_res;
-
- switch (am->tune_state) {
- case MLX5E_AM_PARKING_ON_TOP:
- stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats);
- if (stats_res != MLX5E_AM_STATS_SAME)
- mlx5e_am_exit_parking(am);
- break;
-
- case MLX5E_AM_PARKING_TIRED:
- am->tired--;
- if (!am->tired)
- mlx5e_am_exit_parking(am);
- break;
-
- case MLX5E_AM_GOING_RIGHT:
- case MLX5E_AM_GOING_LEFT:
- stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats);
- if (stats_res != MLX5E_AM_STATS_BETTER)
- mlx5e_am_turn(am);
-
- if (mlx5e_am_on_top(am)) {
- mlx5e_am_park_on_top(am);
- break;
- }
-
- step_res = mlx5e_am_step(am);
- switch (step_res) {
- case MLX5E_AM_ON_EDGE:
- mlx5e_am_park_on_top(am);
- break;
- case MLX5E_AM_TOO_TIRED:
- mlx5e_am_park_tired(am);
- break;
- }
-
- break;
- }
-
- if ((prev_state != MLX5E_AM_PARKING_ON_TOP) ||
- (am->tune_state != MLX5E_AM_PARKING_ON_TOP))
- am->prev_stats = *curr_stats;
-
- return am->profile_ix != prev_ix;
-}
-
-static void mlx5e_am_sample(struct mlx5e_rq *rq,
- struct mlx5e_rx_am_sample *s)
-{
- s->time = ktime_get();
- s->pkt_ctr = rq->stats.packets;
- s->byte_ctr = rq->stats.bytes;
- s->event_ctr = rq->cq.event_ctr;
-}
-
-#define MLX5E_AM_NEVENTS 64
-#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
-#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))
-
-static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
- struct mlx5e_rx_am_sample *end,
- struct mlx5e_rx_am_stats *curr_stats)
-{
- /* u32 holds up to 71 minutes, should be enough */
- u32 delta_us = ktime_us_delta(end->time, start->time);
- u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
- u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
- start->byte_ctr);
-
- if (!delta_us)
- return;
-
- curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
- curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
- curr_stats->epms = DIV_ROUND_UP(MLX5E_AM_NEVENTS * USEC_PER_MSEC,
- delta_us);
-}
-
-void mlx5e_rx_am_work(struct work_struct *work)
-{
- struct mlx5e_rx_am *am = container_of(work, struct mlx5e_rx_am,
- work);
- struct mlx5e_rq *rq = container_of(am, struct mlx5e_rq, am);
- struct mlx5e_cq_moder cur_profile = profile[am->mode][am->profile_ix];
-
- mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq,
- cur_profile.usec, cur_profile.pkts);
-
- am->state = MLX5E_AM_START_MEASURE;
-}
-
-void mlx5e_rx_am(struct mlx5e_rq *rq)
-{
- struct mlx5e_rx_am *am = &rq->am;
- struct mlx5e_rx_am_sample end_sample;
- struct mlx5e_rx_am_stats curr_stats;
- u16 nevents;
-
- switch (am->state) {
- case MLX5E_AM_MEASURE_IN_PROGRESS:
- nevents = BIT_GAP(BITS_PER_TYPE(u16), rq->cq.event_ctr,
- am->start_sample.event_ctr);
- if (nevents < MLX5E_AM_NEVENTS)
- break;
- mlx5e_am_sample(rq, &end_sample);
- mlx5e_am_calc_stats(&am->start_sample, &end_sample,
- &curr_stats);
- if (mlx5e_am_decision(&curr_stats, am)) {
- am->state = MLX5E_AM_APPLY_NEW_PROFILE;
- schedule_work(&am->work);
- break;
- }
- /* fall through */
- case MLX5E_AM_START_MEASURE:
- mlx5e_am_sample(rq, &am->start_sample);
- am->state = MLX5E_AM_MEASURE_IN_PROGRESS;
- break;
- case MLX5E_AM_APPLY_NEW_PROFILE:
- break;
- }
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index b74ddc7984bc..5f0f3493d747 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -71,6 +71,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
};
@@ -99,6 +100,72 @@ static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx;
}
+static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_sw_stats temp, *s = &temp;
+ struct mlx5e_rq_stats *rq_stats;
+ struct mlx5e_sq_stats *sq_stats;
+ struct mlx5e_ch_stats *ch_stats;
+ int i, j;
+
+ memset(s, 0, sizeof(*s));
+ for (i = 0; i < priv->channels.num; i++) {
+ struct mlx5e_channel *c = priv->channels.c[i];
+
+ rq_stats = &c->rq.stats;
+ ch_stats = &c->stats;
+
+ s->rx_packets += rq_stats->packets;
+ s->rx_bytes += rq_stats->bytes;
+ s->rx_lro_packets += rq_stats->lro_packets;
+ s->rx_lro_bytes += rq_stats->lro_bytes;
+ s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
+ s->rx_csum_none += rq_stats->csum_none;
+ s->rx_csum_complete += rq_stats->csum_complete;
+ s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
+ s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
+ s->rx_xdp_drop += rq_stats->xdp_drop;
+ s->rx_xdp_tx += rq_stats->xdp_tx;
+ s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
+ s->rx_wqe_err += rq_stats->wqe_err;
+ s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
+ s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
+ s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
+ s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
+ s->rx_page_reuse += rq_stats->page_reuse;
+ s->rx_cache_reuse += rq_stats->cache_reuse;
+ s->rx_cache_full += rq_stats->cache_full;
+ s->rx_cache_empty += rq_stats->cache_empty;
+ s->rx_cache_busy += rq_stats->cache_busy;
+ s->rx_cache_waive += rq_stats->cache_waive;
+ s->ch_eq_rearm += ch_stats->eq_rearm;
+
+ for (j = 0; j < priv->channels.params.num_tc; j++) {
+ sq_stats = &c->sq[j].stats;
+
+ s->tx_packets += sq_stats->packets;
+ s->tx_bytes += sq_stats->bytes;
+ s->tx_tso_packets += sq_stats->tso_packets;
+ s->tx_tso_bytes += sq_stats->tso_bytes;
+ s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
+ s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
+ s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
+ s->tx_queue_stopped += sq_stats->stopped;
+ s->tx_queue_wake += sq_stats->wake;
+ s->tx_queue_dropped += sq_stats->dropped;
+ s->tx_xmit_more += sq_stats->xmit_more;
+ s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
+ s->tx_csum_none += sq_stats->csum_none;
+ s->tx_csum_partial += sq_stats->csum_partial;
+ }
+ }
+
+ s->link_down_events_phy = MLX5_GET(ppcnt_reg,
+ priv->stats.pport.phy_counters,
+ counter_set.phys_layer_cntrs.link_down_events);
+ memcpy(&priv->stats.sw, s, sizeof(*s));
+}
+
static const struct counter_desc q_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
};
@@ -128,6 +195,22 @@ static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx;
}
+static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
+ u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
+ int err;
+
+ if (!priv->q_counter)
+ return;
+
+ err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out));
+ if (err)
+ return;
+
+ qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer);
+}
+
#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
static const struct counter_desc vport_stats_desc[] = {
{ "rx_vport_unicast_packets",
@@ -200,6 +283,19 @@ static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+static void mlx5e_grp_vport_update_stats(struct mlx5e_priv *priv)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+ u32 *out = (u32 *)priv->stats.vport.query_vport_out;
+ u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
+ MLX5_SET(query_vport_counter_in, in, op_mod, 0);
+ MLX5_SET(query_vport_counter_in, in, other_vport, 0);
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
+}
+
#define PPORT_802_3_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
@@ -252,6 +348,20 @@ static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+static void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ void *out;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ out = pstats->IEEE_802_3_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+}
+
#define PPORT_2863_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
@@ -289,6 +399,20 @@ static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+static void mlx5e_grp_2863_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ void *out;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ out = pstats->RFC_2863_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+}
+
#define PPORT_2819_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
@@ -336,6 +460,20 @@ static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+static void mlx5e_grp_2819_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ void *out;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ out = pstats->RFC_2819_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+}
+
#define PPORT_PHY_STATISTICAL_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.phys_layer_statistical_cntrs.c##_high)
@@ -376,6 +514,27 @@ static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx;
}
+static void mlx5e_grp_phy_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ void *out;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ out = pstats->phy_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+
+ if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
+ return;
+
+ out = pstats->phy_statistical_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+}
+
#define PPORT_ETH_EXT_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
@@ -418,6 +577,23 @@ static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+static void mlx5e_grp_eth_ext_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ void *out;
+
+ if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
+ return;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ out = pstats->eth_ext_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+}
+
#define PCIE_PERF_OFF(c) \
MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
static const struct counter_desc pcie_perf_stats_desc[] = {
@@ -505,6 +681,22 @@ static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+static void mlx5e_grp_pcie_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
+ void *out;
+
+ if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
+ return;
+
+ out = pcie_stats->pcie_perf_counters;
+ MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
+}
+
#define PPORT_PER_PRIO_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_per_prio_grp_data_layout.c##_high)
@@ -656,6 +848,47 @@ static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
return idx;
}
+static int mlx5e_grp_per_prio_get_num_stats(struct mlx5e_priv *priv)
+{
+ return mlx5e_grp_per_prio_traffic_get_num_stats(priv) +
+ mlx5e_grp_per_prio_pfc_get_num_stats(priv);
+}
+
+static int mlx5e_grp_per_prio_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
+ idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
+ return idx;
+}
+
+static int mlx5e_grp_per_prio_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
+ idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
+ return idx;
+}
+
+static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ int prio;
+ void *out;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
+ for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+ out = pstats->per_prio_counters[prio];
+ MLX5_SET(ppcnt_reg, in, prio_tc, prio);
+ mlx5_core_access_reg(mdev, in, sz, out, sz,
+ MLX5_REG_PPCNT, 0, 0);
+ }
+}
+
static const struct counter_desc mlx5e_pme_status_desc[] = {
{ "module_unplug", 8 },
};
@@ -723,6 +956,11 @@ static int mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx + mlx5e_ipsec_get_stats(priv, data + idx);
}
+static void mlx5e_grp_ipsec_update_stats(struct mlx5e_priv *priv)
+{
+ mlx5e_ipsec_update_stats(priv);
+}
+
static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
@@ -767,12 +1005,18 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
};
+static const struct counter_desc ch_stats_desc[] = {
+ { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
+};
+
#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
+#define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
{
return (NUM_RQ_STATS * priv->channels.num) +
+ (NUM_CH_STATS * priv->channels.num) +
(NUM_SQ_STATS * priv->channels.num * priv->channels.params.num_tc);
}
@@ -785,6 +1029,11 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
for (i = 0; i < priv->channels.num; i++)
+ for (j = 0; j < NUM_CH_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ ch_stats_desc[j].format, i);
+
+ for (i = 0; i < priv->channels.num; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i);
@@ -808,6 +1057,12 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
for (i = 0; i < channels->num; i++)
+ for (j = 0; j < NUM_CH_STATS; j++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&channels->c[i]->stats,
+ ch_stats_desc, j);
+
+ for (i = 0; i < channels->num; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
data[idx++] =
MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats,
@@ -823,61 +1078,71 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+/* The stats groups order is opposite to the update_stats() order calls */
const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
{
.get_num_stats = mlx5e_grp_sw_get_num_stats,
.fill_strings = mlx5e_grp_sw_fill_strings,
.fill_stats = mlx5e_grp_sw_fill_stats,
+ .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
+ .update_stats = mlx5e_grp_sw_update_stats,
},
{
.get_num_stats = mlx5e_grp_q_get_num_stats,
.fill_strings = mlx5e_grp_q_fill_strings,
.fill_stats = mlx5e_grp_q_fill_stats,
+ .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
+ .update_stats = mlx5e_grp_q_update_stats,
},
{
.get_num_stats = mlx5e_grp_vport_get_num_stats,
.fill_strings = mlx5e_grp_vport_fill_strings,
.fill_stats = mlx5e_grp_vport_fill_stats,
+ .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
+ .update_stats = mlx5e_grp_vport_update_stats,
},
{
.get_num_stats = mlx5e_grp_802_3_get_num_stats,
.fill_strings = mlx5e_grp_802_3_fill_strings,
.fill_stats = mlx5e_grp_802_3_fill_stats,
+ .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
+ .update_stats = mlx5e_grp_802_3_update_stats,
},
{
.get_num_stats = mlx5e_grp_2863_get_num_stats,
.fill_strings = mlx5e_grp_2863_fill_strings,
.fill_stats = mlx5e_grp_2863_fill_stats,
+ .update_stats = mlx5e_grp_2863_update_stats,
},
{
.get_num_stats = mlx5e_grp_2819_get_num_stats,
.fill_strings = mlx5e_grp_2819_fill_strings,
.fill_stats = mlx5e_grp_2819_fill_stats,
+ .update_stats = mlx5e_grp_2819_update_stats,
},
{
.get_num_stats = mlx5e_grp_phy_get_num_stats,
.fill_strings = mlx5e_grp_phy_fill_strings,
.fill_stats = mlx5e_grp_phy_fill_stats,
+ .update_stats = mlx5e_grp_phy_update_stats,
},
{
.get_num_stats = mlx5e_grp_eth_ext_get_num_stats,
.fill_strings = mlx5e_grp_eth_ext_fill_strings,
.fill_stats = mlx5e_grp_eth_ext_fill_stats,
+ .update_stats = mlx5e_grp_eth_ext_update_stats,
},
{
.get_num_stats = mlx5e_grp_pcie_get_num_stats,
.fill_strings = mlx5e_grp_pcie_fill_strings,
.fill_stats = mlx5e_grp_pcie_fill_stats,
+ .update_stats = mlx5e_grp_pcie_update_stats,
},
{
- .get_num_stats = mlx5e_grp_per_prio_traffic_get_num_stats,
- .fill_strings = mlx5e_grp_per_prio_traffic_fill_strings,
- .fill_stats = mlx5e_grp_per_prio_traffic_fill_stats,
- },
- {
- .get_num_stats = mlx5e_grp_per_prio_pfc_get_num_stats,
- .fill_strings = mlx5e_grp_per_prio_pfc_fill_strings,
- .fill_stats = mlx5e_grp_per_prio_pfc_fill_stats,
+ .get_num_stats = mlx5e_grp_per_prio_get_num_stats,
+ .fill_strings = mlx5e_grp_per_prio_fill_strings,
+ .fill_stats = mlx5e_grp_per_prio_fill_stats,
+ .update_stats = mlx5e_grp_per_prio_update_stats,
},
{
.get_num_stats = mlx5e_grp_pme_get_num_stats,
@@ -888,6 +1153,7 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
.get_num_stats = mlx5e_grp_ipsec_get_num_stats,
.fill_strings = mlx5e_grp_ipsec_fill_strings,
.fill_stats = mlx5e_grp_ipsec_fill_stats,
+ .update_stats = mlx5e_grp_ipsec_update_stats,
},
{
.get_num_stats = mlx5e_grp_channels_get_num_stats,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index d679e21f686e..0b3320a2b072 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -44,6 +44,7 @@
#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld)
struct counter_desc {
char format[ETH_GSTRING_LEN];
@@ -88,6 +89,7 @@ struct mlx5e_sw_stats {
u64 rx_cache_empty;
u64 rx_cache_busy;
u64 rx_cache_waive;
+ u64 ch_eq_rearm;
/* Special handling counters */
u64 link_down_events_phy;
@@ -192,6 +194,10 @@ struct mlx5e_sq_stats {
u64 dropped;
};
+struct mlx5e_ch_stats {
+ u64 eq_rearm;
+};
+
struct mlx5e_stats {
struct mlx5e_sw_stats sw;
struct mlx5e_qcounter_stats qcnt;
@@ -201,11 +207,17 @@ struct mlx5e_stats {
struct mlx5e_pcie_stats pcie;
};
+enum {
+ MLX5E_NDO_UPDATE_STATS = BIT(0x1),
+};
+
struct mlx5e_priv;
struct mlx5e_stats_grp {
+ u16 update_stats_mask;
int (*get_num_stats)(struct mlx5e_priv *priv);
int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
+ void (*update_stats)(struct mlx5e_priv *priv);
};
extern const struct mlx5e_stats_grp mlx5e_stats_grps[];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 55979ec2e88a..fd98b0dc610f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -51,17 +51,22 @@
#include "en_tc.h"
#include "eswitch.h"
#include "vxlan.h"
+#include "fs_core.h"
struct mlx5_nic_flow_attr {
u32 action;
u32 flow_tag;
u32 mod_hdr_id;
+ u32 hairpin_tirn;
+ struct mlx5_flow_table *hairpin_ft;
};
enum {
MLX5E_TC_FLOW_ESWITCH = BIT(0),
MLX5E_TC_FLOW_NIC = BIT(1),
MLX5E_TC_FLOW_OFFLOADED = BIT(2),
+ MLX5E_TC_FLOW_HAIRPIN = BIT(3),
+ MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(4),
};
struct mlx5e_tc_flow {
@@ -71,6 +76,7 @@ struct mlx5e_tc_flow {
struct mlx5_flow_handle *rule;
struct list_head encap; /* flows sharing the same encap ID */
struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
+ struct list_head hairpin; /* flows sharing the same hairpin */
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
@@ -93,6 +99,32 @@ enum {
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE (1 << 16)
+struct mlx5e_hairpin {
+ struct mlx5_hairpin *pair;
+
+ struct mlx5_core_dev *func_mdev;
+ struct mlx5e_priv *func_priv;
+ u32 tdn;
+ u32 tirn;
+
+ int num_channels;
+ struct mlx5e_rqt indir_rqt;
+ u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
+ struct mlx5e_ttc_table ttc;
+};
+
+struct mlx5e_hairpin_entry {
+ /* a node of a hash table which keeps all the hairpin entries */
+ struct hlist_node hairpin_hlist;
+
+ /* flows sharing the same hairpin */
+ struct list_head flows;
+
+ u16 peer_vhca_id;
+ u8 prio;
+ struct mlx5e_hairpin *hp;
+};
+
struct mod_hdr_key {
int num_actions;
void *actions;
@@ -222,6 +254,417 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
}
}
+static
+struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
+{
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+
+ netdev = __dev_get_by_index(net, ifindex);
+ priv = netdev_priv(netdev);
+ return priv->mdev;
+}
+
+static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
+{
+ u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
+ void *tirc;
+ int err;
+
+ err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
+ if (err)
+ goto alloc_tdn_err;
+
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
+ MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
+ MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
+
+ err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
+ if (err)
+ goto create_tir_err;
+
+ return 0;
+
+create_tir_err:
+ mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
+alloc_tdn_err:
+ return err;
+}
+
+static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
+{
+ mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
+ mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
+}
+
+static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
+{
+ u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
+ struct mlx5e_priv *priv = hp->func_priv;
+ int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
+
+ mlx5e_build_default_indir_rqt(indirection_rqt, sz,
+ hp->num_channels);
+
+ for (i = 0; i < sz; i++) {
+ ix = i;
+ if (priv->channels.params.rss_hfunc == ETH_RSS_HASH_XOR)
+ ix = mlx5e_bits_invert(i, ilog2(sz));
+ ix = indirection_rqt[ix];
+ rqn = hp->pair->rqn[ix];
+ MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
+ }
+}
+
+static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
+{
+ int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
+ struct mlx5e_priv *priv = hp->func_priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ void *rqtc;
+ u32 *in;
+
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
+ MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
+
+ mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
+
+ err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
+ if (!err)
+ hp->indir_rqt.enabled = true;
+
+ kvfree(in);
+ return err;
+}
+
+static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
+{
+ struct mlx5e_priv *priv = hp->func_priv;
+ u32 in[MLX5_ST_SZ_DW(create_tir_in)];
+ int tt, i, err;
+ void *tirc;
+
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+
+ MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+ MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
+ mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
+
+ err = mlx5_core_create_tir(hp->func_mdev, in,
+ MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
+ if (err) {
+ mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
+ goto err_destroy_tirs;
+ }
+ }
+ return 0;
+
+err_destroy_tirs:
+ for (i = 0; i < tt; i++)
+ mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
+ return err;
+}
+
+static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
+{
+ int tt;
+
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
+}
+
+static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
+ struct ttc_params *ttc_params)
+{
+ struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
+ int tt;
+
+ memset(ttc_params, 0, sizeof(*ttc_params));
+
+ ttc_params->any_tt_tirn = hp->tirn;
+
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
+
+ ft_attr->max_fte = MLX5E_NUM_TT;
+ ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
+ ft_attr->prio = MLX5E_TC_PRIO;
+}
+
+static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
+{
+ struct mlx5e_priv *priv = hp->func_priv;
+ struct ttc_params ttc_params;
+ int err;
+
+ err = mlx5e_hairpin_create_indirect_rqt(hp);
+ if (err)
+ return err;
+
+ err = mlx5e_hairpin_create_indirect_tirs(hp);
+ if (err)
+ goto err_create_indirect_tirs;
+
+ mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
+ err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
+ if (err)
+ goto err_create_ttc_table;
+
+ netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
+ hp->num_channels, hp->ttc.ft.t->id);
+
+ return 0;
+
+err_create_ttc_table:
+ mlx5e_hairpin_destroy_indirect_tirs(hp);
+err_create_indirect_tirs:
+ mlx5e_destroy_rqt(priv, &hp->indir_rqt);
+
+ return err;
+}
+
+static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
+{
+ struct mlx5e_priv *priv = hp->func_priv;
+
+ mlx5e_destroy_ttc_table(priv, &hp->ttc);
+ mlx5e_hairpin_destroy_indirect_tirs(hp);
+ mlx5e_destroy_rqt(priv, &hp->indir_rqt);
+}
+
+static struct mlx5e_hairpin *
+mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
+ int peer_ifindex)
+{
+ struct mlx5_core_dev *func_mdev, *peer_mdev;
+ struct mlx5e_hairpin *hp;
+ struct mlx5_hairpin *pair;
+ int err;
+
+ hp = kzalloc(sizeof(*hp), GFP_KERNEL);
+ if (!hp)
+ return ERR_PTR(-ENOMEM);
+
+ func_mdev = priv->mdev;
+ peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
+
+ pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
+ if (IS_ERR(pair)) {
+ err = PTR_ERR(pair);
+ goto create_pair_err;
+ }
+ hp->pair = pair;
+ hp->func_mdev = func_mdev;
+ hp->func_priv = priv;
+ hp->num_channels = params->num_channels;
+
+ err = mlx5e_hairpin_create_transport(hp);
+ if (err)
+ goto create_transport_err;
+
+ if (hp->num_channels > 1) {
+ err = mlx5e_hairpin_rss_init(hp);
+ if (err)
+ goto rss_init_err;
+ }
+
+ return hp;
+
+rss_init_err:
+ mlx5e_hairpin_destroy_transport(hp);
+create_transport_err:
+ mlx5_core_hairpin_destroy(hp->pair);
+create_pair_err:
+ kfree(hp);
+ return ERR_PTR(err);
+}
+
+static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
+{
+ if (hp->num_channels > 1)
+ mlx5e_hairpin_rss_cleanup(hp);
+ mlx5e_hairpin_destroy_transport(hp);
+ mlx5_core_hairpin_destroy(hp->pair);
+ kvfree(hp);
+}
+
+static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
+{
+ return (peer_vhca_id << 16 | prio);
+}
+
+static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
+ u16 peer_vhca_id, u8 prio)
+{
+ struct mlx5e_hairpin_entry *hpe;
+ u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
+
+ hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
+ hairpin_hlist, hash_key) {
+ if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio)
+ return hpe;
+ }
+
+ return NULL;
+}
+
+#define UNKNOWN_MATCH_PRIO 8
+
+static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec, u8 *match_prio)
+{
+ void *headers_c, *headers_v;
+ u8 prio_val, prio_mask = 0;
+ bool vlan_present;
+
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
+ netdev_warn(priv->netdev,
+ "only PCP trust state supported for hairpin\n");
+ return -EOPNOTSUPP;
+ }
+#endif
+ headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
+
+ vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
+ if (vlan_present) {
+ prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
+ prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
+ }
+
+ if (!vlan_present || !prio_mask) {
+ prio_val = UNKNOWN_MATCH_PRIO;
+ } else if (prio_mask != 0x7) {
+ netdev_warn(priv->netdev,
+ "masked priority match not supported for hairpin\n");
+ return -EOPNOTSUPP;
+ }
+
+ *match_prio = prio_val;
+ return 0;
+}
+
+static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5e_tc_flow_parse_attr *parse_attr)
+{
+ int peer_ifindex = parse_attr->mirred_ifindex;
+ struct mlx5_hairpin_params params;
+ struct mlx5_core_dev *peer_mdev;
+ struct mlx5e_hairpin_entry *hpe;
+ struct mlx5e_hairpin *hp;
+ u64 link_speed64;
+ u32 link_speed;
+ u8 match_prio;
+ u16 peer_id;
+ int err;
+
+ peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
+ if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
+ netdev_warn(priv->netdev, "hairpin is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
+ err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio);
+ if (err)
+ return err;
+ hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
+ if (hpe)
+ goto attach_flow;
+
+ hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
+ if (!hpe)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&hpe->flows);
+ hpe->peer_vhca_id = peer_id;
+ hpe->prio = match_prio;
+
+ params.log_data_size = 15;
+ params.log_data_size = min_t(u8, params.log_data_size,
+ MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
+ params.log_data_size = max_t(u8, params.log_data_size,
+ MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
+
+ params.log_num_packets = params.log_data_size -
+ MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
+ params.log_num_packets = min_t(u8, params.log_num_packets,
+ MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
+
+ params.q_counter = priv->q_counter;
+ /* set hairpin pair per each 50Gbs share of the link */
+ mlx5e_get_max_linkspeed(priv->mdev, &link_speed);
+ link_speed = max_t(u32, link_speed, 50000);
+ link_speed64 = link_speed;
+ do_div(link_speed64, 50000);
+ params.num_channels = link_speed64;
+
+ hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
+ if (IS_ERR(hp)) {
+ err = PTR_ERR(hp);
+ goto create_hairpin_err;
+ }
+
+ netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
+ hp->tirn, hp->pair->rqn[0], hp->pair->peer_mdev->priv.name,
+ hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
+
+ hpe->hp = hp;
+ hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
+ hash_hairpin_info(peer_id, match_prio));
+
+attach_flow:
+ if (hpe->hp->num_channels > 1) {
+ flow->flags |= MLX5E_TC_FLOW_HAIRPIN_RSS;
+ flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
+ } else {
+ flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
+ }
+ list_add(&flow->hairpin, &hpe->flows);
+
+ return 0;
+
+create_hairpin_err:
+ kfree(hpe);
+ return err;
+}
+
+static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow)
+{
+ struct list_head *next = flow->hairpin.next;
+
+ list_del(&flow->hairpin);
+
+ /* no more hairpin flows for us, release the hairpin pair */
+ if (list_empty(next)) {
+ struct mlx5e_hairpin_entry *hpe;
+
+ hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
+
+ netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
+ hpe->hp->pair->peer_mdev->priv.name);
+
+ mlx5e_hairpin_destroy(hpe->hp);
+ hash_del(&hpe->hairpin_hlist);
+ kfree(hpe);
+ }
+}
+
static struct mlx5_flow_handle *
mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
@@ -229,7 +672,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
{
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
struct mlx5_core_dev *dev = priv->mdev;
- struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_destination dest[2] = {};
struct mlx5_flow_act flow_act = {
.action = attr->action,
.flow_tag = attr->flow_tag,
@@ -238,18 +681,37 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5_fc *counter = NULL;
struct mlx5_flow_handle *rule;
bool table_created = false;
- int err;
+ int err, dest_ix = 0;
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = priv->fs.vlan.ft.t;
- } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
- counter = mlx5_fc_create(dev, true);
- if (IS_ERR(counter))
- return ERR_CAST(counter);
+ if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
+ err = mlx5e_hairpin_flow_add(priv, flow, parse_attr);
+ if (err) {
+ rule = ERR_PTR(err);
+ goto err_add_hairpin_flow;
+ }
+ if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) {
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[dest_ix].ft = attr->hairpin_ft;
+ } else {
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest[dest_ix].tir_num = attr->hairpin_tirn;
+ }
+ dest_ix++;
+ } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[dest_ix].ft = priv->fs.vlan.ft.t;
+ dest_ix++;
+ }
- dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter = counter;
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ counter = mlx5_fc_create(dev, true);
+ if (IS_ERR(counter)) {
+ rule = ERR_CAST(counter);
+ goto err_fc_create;
+ }
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[dest_ix].counter = counter;
+ dest_ix++;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
@@ -279,7 +741,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
MLX5E_TC_PRIO,
tc_tbl_size,
MLX5E_TC_TABLE_NUM_GROUPS,
- 0, 0);
+ MLX5E_TC_FT_LEVEL, 0);
if (IS_ERR(priv->fs.tc.t)) {
netdev_err(priv->netdev,
"Failed to create tc offload table\n");
@@ -292,7 +754,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
- &flow_act, &dest, 1);
+ &flow_act, dest, dest_ix);
if (IS_ERR(rule))
goto err_add_rule;
@@ -309,7 +771,10 @@ err_create_ft:
mlx5e_detach_mod_hdr(priv, flow);
err_create_mod_hdr_id:
mlx5_fc_destroy(dev, counter);
-
+err_fc_create:
+ if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
+ mlx5e_hairpin_flow_del(priv, flow);
+err_add_hairpin_flow:
return rule;
}
@@ -330,6 +795,9 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
+
+ if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
+ mlx5e_hairpin_flow_del(priv, flow);
}
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
@@ -617,7 +1085,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
FLOW_DISSECTOR_KEY_ENC_PORTS,
f->mask);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
+ struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ struct net_device *up_dev = uplink_rpriv->netdev;
struct mlx5e_priv *up_priv = netdev_priv(up_dev);
/* Full udp dst port must be given */
@@ -1421,6 +1890,20 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
return true;
}
+static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
+{
+ struct mlx5_core_dev *fmdev, *pmdev;
+ u16 func_id, peer_id;
+
+ fmdev = priv->mdev;
+ pmdev = peer_priv->mdev;
+
+ func_id = (u16)((fmdev->pdev->bus->number << 8) | PCI_SLOT(fmdev->pdev->devfn));
+ peer_id = (u16)((pmdev->pdev->bus->number << 8) | PCI_SLOT(pmdev->pdev->devfn));
+
+ return (func_id == peer_id);
+}
+
static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow)
@@ -1465,6 +1948,23 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EOPNOTSUPP;
}
+ if (is_tcf_mirred_egress_redirect(a)) {
+ struct net_device *peer_dev = tcf_mirred_dev(a);
+
+ if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
+ same_hw_devs(priv, netdev_priv(peer_dev))) {
+ parse_attr->mirred_ifindex = peer_dev->ifindex;
+ flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ } else {
+ netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
+ peer_dev->name);
+ return -EINVAL;
+ }
+ continue;
+ }
+
if (is_tcf_skbedit_mark(a)) {
u32 mark = tcf_skbedit_mark(a);
@@ -1507,6 +2007,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
int *out_ttl)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_rep_priv *uplink_rpriv;
struct rtable *rt;
struct neighbour *n = NULL;
@@ -1520,9 +2021,10 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
#else
return -EOPNOTSUPP;
#endif
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
/* if the egress device isn't on the same HW e-switch, we use the uplink */
if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
- *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
+ *out_dev = uplink_rpriv->netdev;
else
*out_dev = rt->dst.dev;
@@ -1547,6 +2049,7 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
struct dst_entry *dst;
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
+ struct mlx5e_rep_priv *uplink_rpriv;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int ret;
@@ -1557,9 +2060,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
*out_ttl = ip6_dst_hoplimit(dst);
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
/* if the egress device isn't on the same HW e-switch, we use the uplink */
if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
- *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
+ *out_dev = uplink_rpriv->netdev;
else
*out_dev = dst->dev;
#else
@@ -1859,7 +2363,9 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
+ struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw,
+ REP_ETH);
+ struct net_device *up_dev = uplink_rpriv->netdev;
unsigned short family = ip_tunnel_info_af(tun_info);
struct mlx5e_priv *up_priv = netdev_priv(up_dev);
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
@@ -1982,11 +2488,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
}
if (is_tcf_mirred_egress_redirect(a)) {
- int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev;
struct mlx5e_priv *out_priv;
- out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
+ out_dev = tcf_mirred_dev(a);
if (switchdev_port_same_parent_id(priv->netdev,
out_dev)) {
@@ -1996,7 +2501,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
rpriv = out_priv->ppriv;
attr->out_rep = rpriv->rep;
} else if (encap) {
- parse_attr->mirred_ifindex = ifindex;
+ parse_attr->mirred_ifindex = out_dev->ifindex;
parse_attr->tun_info = *info;
attr->parse_attr = parse_attr;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
@@ -2182,6 +2687,7 @@ int mlx5e_tc_init(struct mlx5e_priv *priv)
struct mlx5e_tc_table *tc = &priv->fs.tc;
hash_init(tc->mod_hdr_tbl);
+ hash_init(tc->hairpin_tbl);
tc->ht_params = mlx5e_tc_flow_ht_params;
return rhashtable_init(&tc->ht, &tc->ht_params);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index ab92298eafc3..f292bb346985 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -78,8 +78,14 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
for (i = 0; i < c->num_tc; i++)
mlx5e_cq_arm(&c->sq[i].cq);
- if (MLX5E_TEST_BIT(c->rq.state, MLX5E_RQ_STATE_AM))
- mlx5e_rx_am(&c->rq);
+ if (MLX5E_TEST_BIT(c->rq.state, MLX5E_RQ_STATE_AM)) {
+ struct net_dim_sample dim_sample;
+ net_dim_sample(c->rq.cq.event_ctr,
+ c->rq.stats.packets,
+ c->rq.stats.bytes,
+ &dim_sample);
+ net_dim(&c->rq.dim, dim_sample);
+ }
mlx5e_cq_arm(&c->rq.cq);
mlx5e_cq_arm(&c->icosq.cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 14d57828945d..05c1157bc9f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -534,6 +534,24 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
return IRQ_HANDLED;
}
+/* Some architectures don't latch interrupts when they are disabled, so using
+ * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
+ * avoid losing them. It is not recommended to use it, unless this is the last
+ * resort.
+ */
+u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq)
+{
+ u32 count_eqe;
+
+ disable_irq(eq->irqn);
+ count_eqe = eq->cons_index;
+ mlx5_eq_int(eq->irqn, eq);
+ count_eqe = eq->cons_index - count_eqe;
+ enable_irq(eq->irqn);
+
+ return count_eqe;
+}
+
static void init_eq_buf(struct mlx5_eq *eq)
{
struct mlx5_eqe *eqe;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index bbb140f517c4..5ecf2cddc16d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -37,6 +37,7 @@
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "eswitch.h"
+#include "fs_core.h"
#define UPLINK_VPORT 0xFFFF
@@ -867,9 +868,10 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
- root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
+ root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+ vport->vport);
if (!root_ns) {
- esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
+ esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport);
return -EOPNOTSUPP;
}
@@ -984,9 +986,10 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
- root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
+ root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
+ vport->vport);
if (!root_ns) {
- esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
+ esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport);
return -EOPNOTSUPP;
}
@@ -1121,8 +1124,12 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
+ struct mlx5_fc *counter = vport->ingress.drop_counter;
+ struct mlx5_flow_destination drop_ctr_dst = {0};
+ struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
+ int dest_num = 0;
int err = 0;
u8 *smac_v;
@@ -1186,9 +1193,18 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
memset(spec, 0, sizeof(*spec));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+ /* Attach drop flow counter */
+ if (counter) {
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ drop_ctr_dst.counter = counter;
+ dst = &drop_ctr_dst;
+ dest_num++;
+ }
vport->ingress.drop_rule =
mlx5_add_flow_rules(vport->ingress.acl, spec,
- &flow_act, NULL, 0);
+ &flow_act, dst, dest_num);
if (IS_ERR(vport->ingress.drop_rule)) {
err = PTR_ERR(vport->ingress.drop_rule);
esw_warn(esw->dev,
@@ -1208,8 +1224,12 @@ out:
static int esw_vport_egress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
+ struct mlx5_fc *counter = vport->egress.drop_counter;
+ struct mlx5_flow_destination drop_ctr_dst = {0};
+ struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
+ int dest_num = 0;
int err = 0;
esw_vport_cleanup_egress_rules(esw, vport);
@@ -1260,9 +1280,18 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
/* Drop others rule (star rule) */
memset(spec, 0, sizeof(*spec));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+ /* Attach egress drop flow counter */
+ if (counter) {
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ drop_ctr_dst.counter = counter;
+ dst = &drop_ctr_dst;
+ dest_num++;
+ }
vport->egress.drop_rule =
mlx5_add_flow_rules(vport->egress.acl, spec,
- &flow_act, NULL, 0);
+ &flow_act, dst, dest_num);
if (IS_ERR(vport->egress.drop_rule)) {
err = PTR_ERR(vport->egress.drop_rule);
esw_warn(esw->dev,
@@ -1290,7 +1319,7 @@ static int esw_create_tsar(struct mlx5_eswitch *esw)
err = mlx5_create_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH,
- &tsar_ctx,
+ tsar_ctx,
&esw->qos.root_tsar_id);
if (err) {
esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
@@ -1333,20 +1362,20 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
if (vport->qos.enabled)
return -EEXIST;
- MLX5_SET(scheduling_context, &sched_ctx, element_type,
+ MLX5_SET(scheduling_context, sched_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
- vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx,
+ vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
element_attributes);
MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
- MLX5_SET(scheduling_context, &sched_ctx, parent_element_id,
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
esw->qos.root_tsar_id);
- MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
initial_max_rate);
- MLX5_SET(scheduling_context, &sched_ctx, bw_share, initial_bw_share);
+ MLX5_SET(scheduling_context, sched_ctx, bw_share, initial_bw_share);
err = mlx5_create_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH,
- &sched_ctx,
+ sched_ctx,
&vport->qos.esw_tsar_ix);
if (err) {
esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
@@ -1392,22 +1421,22 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
if (!vport->qos.enabled)
return -EIO;
- MLX5_SET(scheduling_context, &sched_ctx, element_type,
+ MLX5_SET(scheduling_context, sched_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
- vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx,
+ vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
element_attributes);
MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
- MLX5_SET(scheduling_context, &sched_ctx, parent_element_id,
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
esw->qos.root_tsar_id);
- MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
max_rate);
- MLX5_SET(scheduling_context, &sched_ctx, bw_share, bw_share);
+ MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
err = mlx5_modify_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH,
- &sched_ctx,
+ sched_ctx,
vport->qos.esw_tsar_ix,
bitmask);
if (err) {
@@ -1455,6 +1484,41 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
}
}
+static void esw_vport_create_drop_counters(struct mlx5_vport *vport)
+{
+ struct mlx5_core_dev *dev = vport->dev;
+
+ if (MLX5_CAP_ESW_INGRESS_ACL(dev, flow_counter)) {
+ vport->ingress.drop_counter = mlx5_fc_create(dev, false);
+ if (IS_ERR(vport->ingress.drop_counter)) {
+ esw_warn(dev,
+ "vport[%d] configure ingress drop rule counter failed\n",
+ vport->vport);
+ vport->ingress.drop_counter = NULL;
+ }
+ }
+
+ if (MLX5_CAP_ESW_EGRESS_ACL(dev, flow_counter)) {
+ vport->egress.drop_counter = mlx5_fc_create(dev, false);
+ if (IS_ERR(vport->egress.drop_counter)) {
+ esw_warn(dev,
+ "vport[%d] configure egress drop rule counter failed\n",
+ vport->vport);
+ vport->egress.drop_counter = NULL;
+ }
+ }
+}
+
+static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
+{
+ struct mlx5_core_dev *dev = vport->dev;
+
+ if (vport->ingress.drop_counter)
+ mlx5_fc_destroy(dev, vport->ingress.drop_counter);
+ if (vport->egress.drop_counter)
+ mlx5_fc_destroy(dev, vport->egress.drop_counter);
+}
+
static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
int enable_events)
{
@@ -1481,6 +1545,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
if (!vport_num)
vport->info.trusted = true;
+ /* create steering drop counters for ingress and egress ACLs */
+ if (vport_num && esw->mode == SRIOV_LEGACY)
+ esw_vport_create_drop_counters(vport);
+
esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
@@ -1519,6 +1587,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
esw_vport_disable_egress_acl(esw, vport);
esw_vport_disable_ingress_acl(esw, vport);
+ esw_vport_destroy_drop_counters(vport);
}
esw->enabled_vports--;
mutex_unlock(&esw->state_lock);
@@ -1644,13 +1713,9 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto abort;
}
- esw->offloads.vport_reps =
- kzalloc(total_vports * sizeof(struct mlx5_eswitch_rep),
- GFP_KERNEL);
- if (!esw->offloads.vport_reps) {
- err = -ENOMEM;
+ err = esw_offloads_init_reps(esw);
+ if (err)
goto abort;
- }
hash_init(esw->offloads.encap_tbl);
hash_init(esw->offloads.mod_hdr_tbl);
@@ -1681,8 +1746,8 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
abort:
if (esw->work_queue)
destroy_workqueue(esw->work_queue);
+ esw_offloads_cleanup_reps(esw);
kfree(esw->vports);
- kfree(esw->offloads.vport_reps);
kfree(esw);
return err;
}
@@ -1696,7 +1761,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
- kfree(esw->offloads.vport_reps);
+ esw_offloads_cleanup_reps(esw);
kfree(esw->vports);
kfree(esw);
}
@@ -2018,12 +2083,36 @@ unlock:
return err;
}
+static void mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
+ int vport_idx,
+ struct mlx5_vport_drop_stats *stats)
+{
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ struct mlx5_vport *vport = &esw->vports[vport_idx];
+ u64 bytes = 0;
+ u16 idx = 0;
+
+ if (!vport->enabled || esw->mode != SRIOV_LEGACY)
+ return;
+
+ if (vport->egress.drop_counter) {
+ idx = vport->egress.drop_counter->id;
+ mlx5_fc_query(dev, idx, &stats->rx_dropped, &bytes);
+ }
+
+ if (vport->ingress.drop_counter) {
+ idx = vport->ingress.drop_counter->id;
+ mlx5_fc_query(dev, idx, &stats->tx_dropped, &bytes);
+ }
+}
+
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
int vport,
struct ifla_vf_stats *vf_stats)
{
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
+ struct mlx5_vport_drop_stats stats = {0};
int err = 0;
u32 *out;
@@ -2078,6 +2167,10 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
vf_stats->broadcast =
MLX5_GET_CTR(out, received_eth_broadcast.packets);
+ mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
+ vf_stats->rx_dropped = stats.rx_dropped;
+ vf_stats->tx_dropped = stats.tx_dropped;
+
free_out:
kvfree(out);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 565c8b7a399a..2fa037066b2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -45,6 +45,11 @@ enum {
SRIOV_OFFLOADS
};
+enum {
+ REP_ETH,
+ NUM_REP_TYPES,
+};
+
#ifdef CONFIG_MLX5_ESWITCH
#define MLX5_MAX_UC_PER_VPORT(dev) \
@@ -68,6 +73,7 @@ struct vport_ingress {
struct mlx5_flow_group *drop_grp;
struct mlx5_flow_handle *allow_rule;
struct mlx5_flow_handle *drop_rule;
+ struct mlx5_fc *drop_counter;
};
struct vport_egress {
@@ -76,6 +82,12 @@ struct vport_egress {
struct mlx5_flow_group *drop_grp;
struct mlx5_flow_handle *allowed_vlan;
struct mlx5_flow_handle *drop_rule;
+ struct mlx5_fc *drop_counter;
+};
+
+struct mlx5_vport_drop_stats {
+ u64 rx_dropped;
+ u64 tx_dropped;
};
struct mlx5_vport_info {
@@ -133,25 +145,21 @@ struct mlx5_eswitch_fdb {
};
};
-struct mlx5_esw_sq {
- struct mlx5_flow_handle *send_to_vport_rule;
- struct list_head list;
+struct mlx5_eswitch_rep;
+struct mlx5_eswitch_rep_if {
+ int (*load)(struct mlx5_core_dev *dev,
+ struct mlx5_eswitch_rep *rep);
+ void (*unload)(struct mlx5_eswitch_rep *rep);
+ void *priv;
+ bool valid;
};
struct mlx5_eswitch_rep {
- int (*load)(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep);
- void (*unload)(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep);
+ struct mlx5_eswitch_rep_if rep_if[NUM_REP_TYPES];
u16 vport;
u8 hw_id[ETH_ALEN];
- struct net_device *netdev;
-
- struct mlx5_flow_handle *vport_rx_rule;
- struct list_head vport_sqs_list;
u16 vlan;
u32 vlan_refcount;
- bool valid;
};
struct mlx5_esw_offload {
@@ -197,6 +205,8 @@ struct mlx5_eswitch {
void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
+void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
+int esw_offloads_init_reps(struct mlx5_eswitch *esw);
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
@@ -221,6 +231,10 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
int vport,
struct ifla_vf_stats *vf_stats);
+struct mlx5_flow_handle *
+mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport,
+ u32 sqn);
+void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
struct mlx5_flow_spec;
struct mlx5_esw_flow_attr;
@@ -257,12 +271,6 @@ struct mlx5_esw_flow_attr {
struct mlx5e_tc_flow_parse_attr *parse_attr;
};
-int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep,
- u16 *sqns_array, int sqns_num);
-void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep);
-
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode);
@@ -272,10 +280,12 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap);
int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap);
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
int vport_index,
- struct mlx5_eswitch_rep *rep);
+ struct mlx5_eswitch_rep_if *rep_if,
+ u8 rep_type);
void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
- int vport_index);
-struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw);
+ int vport_index,
+ u8 rep_type);
+void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 1143d80119bd..99f583a15cc3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -130,7 +130,7 @@ static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
rep = &esw->offloads.vport_reps[vf_vport];
- if (!rep->valid)
+ if (!rep->rep_if[REP_ETH].valid)
continue;
err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
@@ -302,7 +302,7 @@ out:
return err;
}
-static struct mlx5_flow_handle *
+struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
{
struct mlx5_flow_act flow_act = {0};
@@ -339,57 +339,9 @@ out:
return flow_rule;
}
-void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep)
-{
- struct mlx5_esw_sq *esw_sq, *tmp;
-
- if (esw->mode != SRIOV_OFFLOADS)
- return;
-
- list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
- mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
- list_del(&esw_sq->list);
- kfree(esw_sq);
- }
-}
-
-int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep,
- u16 *sqns_array, int sqns_num)
+void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
{
- struct mlx5_flow_handle *flow_rule;
- struct mlx5_esw_sq *esw_sq;
- int err;
- int i;
-
- if (esw->mode != SRIOV_OFFLOADS)
- return 0;
-
- for (i = 0; i < sqns_num; i++) {
- esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
- if (!esw_sq) {
- err = -ENOMEM;
- goto out_err;
- }
-
- /* Add re-inject rule to the PF/representor sqs */
- flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
- rep->vport,
- sqns_array[i]);
- if (IS_ERR(flow_rule)) {
- err = PTR_ERR(flow_rule);
- kfree(esw_sq);
- goto out_err;
- }
- esw_sq->send_to_vport_rule = flow_rule;
- list_add(&esw_sq->list, &rep->vport_sqs_list);
- }
- return 0;
-
-out_err:
- mlx5_eswitch_sqs2vport_stop(esw, rep);
- return err;
+ mlx5_del_flow_rules(rule);
}
static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
@@ -732,12 +684,111 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
return err;
}
-int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
+void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
+{
+ kfree(esw->offloads.vport_reps);
+}
+
+int esw_offloads_init_reps(struct mlx5_eswitch *esw)
+{
+ int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_esw_offload *offloads;
+ struct mlx5_eswitch_rep *rep;
+ u8 hw_id[ETH_ALEN];
+ int vport;
+
+ esw->offloads.vport_reps = kcalloc(total_vfs,
+ sizeof(struct mlx5_eswitch_rep),
+ GFP_KERNEL);
+ if (!esw->offloads.vport_reps)
+ return -ENOMEM;
+
+ offloads = &esw->offloads;
+ mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
+
+ for (vport = 0; vport < total_vfs; vport++) {
+ rep = &offloads->vport_reps[vport];
+
+ rep->vport = vport;
+ ether_addr_copy(rep->hw_id, hw_id);
+ }
+
+ offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
+
+ return 0;
+}
+
+static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
+ u8 rep_type)
+{
+ struct mlx5_eswitch_rep *rep;
+ int vport;
+
+ for (vport = nvports - 1; vport >= 0; vport--) {
+ rep = &esw->offloads.vport_reps[vport];
+ if (!rep->rep_if[rep_type].valid)
+ continue;
+
+ rep->rep_if[rep_type].unload(rep);
+ }
+}
+
+static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
+{
+ u8 rep_type = NUM_REP_TYPES;
+
+ while (rep_type-- > 0)
+ esw_offloads_unload_reps_type(esw, nvports, rep_type);
+}
+
+static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
+ u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
int vport;
int err;
+ for (vport = 0; vport < nvports; vport++) {
+ rep = &esw->offloads.vport_reps[vport];
+ if (!rep->rep_if[rep_type].valid)
+ continue;
+
+ err = rep->rep_if[rep_type].load(esw->dev, rep);
+ if (err)
+ goto err_reps;
+ }
+
+ return 0;
+
+err_reps:
+ esw_offloads_unload_reps_type(esw, vport, rep_type);
+ return err;
+}
+
+static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
+{
+ u8 rep_type = 0;
+ int err;
+
+ for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
+ err = esw_offloads_load_reps_type(esw, nvports, rep_type);
+ if (err)
+ goto err_reps;
+ }
+
+ return err;
+
+err_reps:
+ while (rep_type-- > 0)
+ esw_offloads_unload_reps_type(esw, nvports, rep_type);
+ return err;
+}
+
+int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
+{
+ int err;
+
/* disable PF RoCE so missed packets don't go through RoCE steering */
mlx5_dev_list_lock();
mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
@@ -755,25 +806,13 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
if (err)
goto create_fg_err;
- for (vport = 0; vport < nvports; vport++) {
- rep = &esw->offloads.vport_reps[vport];
- if (!rep->valid)
- continue;
-
- err = rep->load(esw, rep);
- if (err)
- goto err_reps;
- }
+ err = esw_offloads_load_reps(esw, nvports);
+ if (err)
+ goto err_reps;
return 0;
err_reps:
- for (vport--; vport >= 0; vport--) {
- rep = &esw->offloads.vport_reps[vport];
- if (!rep->valid)
- continue;
- rep->unload(esw, rep);
- }
esw_destroy_vport_rx_group(esw);
create_fg_err:
@@ -814,16 +853,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
{
- struct mlx5_eswitch_rep *rep;
- int vport;
-
- for (vport = nvports - 1; vport >= 0; vport--) {
- rep = &esw->offloads.vport_reps[vport];
- if (!rep->valid)
- continue;
- rep->unload(esw, rep);
- }
-
+ esw_offloads_unload_reps(esw, nvports);
esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_table(esw);
esw_destroy_offloads_fdb_tables(esw);
@@ -1120,27 +1150,23 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
int vport_index,
- struct mlx5_eswitch_rep *__rep)
+ struct mlx5_eswitch_rep_if *__rep_if,
+ u8 rep_type)
{
struct mlx5_esw_offload *offloads = &esw->offloads;
- struct mlx5_eswitch_rep *rep;
-
- rep = &offloads->vport_reps[vport_index];
+ struct mlx5_eswitch_rep_if *rep_if;
- memset(rep, 0, sizeof(*rep));
+ rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type];
- rep->load = __rep->load;
- rep->unload = __rep->unload;
- rep->vport = __rep->vport;
- rep->netdev = __rep->netdev;
- ether_addr_copy(rep->hw_id, __rep->hw_id);
+ rep_if->load = __rep_if->load;
+ rep_if->unload = __rep_if->unload;
+ rep_if->priv = __rep_if->priv;
- INIT_LIST_HEAD(&rep->vport_sqs_list);
- rep->valid = true;
+ rep_if->valid = true;
}
void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
- int vport_index)
+ int vport_index, u8 rep_type)
{
struct mlx5_esw_offload *offloads = &esw->offloads;
struct mlx5_eswitch_rep *rep;
@@ -1148,17 +1174,17 @@ void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
rep = &offloads->vport_reps[vport_index];
if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
- rep->unload(esw, rep);
+ rep->rep_if[rep_type].unload(rep);
- rep->valid = false;
+ rep->rep_if[rep_type].valid = false;
}
-struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw)
+void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
{
#define UPLINK_REP_INDEX 0
struct mlx5_esw_offload *offloads = &esw->offloads;
struct mlx5_eswitch_rep *rep;
rep = &offloads->vport_reps[UPLINK_REP_INDEX];
- return rep->netdev;
+ return rep->rep_if[rep_type].priv;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index dfaad9ecb2b8..c025c98700e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -89,6 +89,9 @@
/* One more level for tc */
#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
+#define KERNEL_NIC_TC_NUM_PRIOS 1
+#define KERNEL_NIC_TC_NUM_LEVELS 2
+
#define ANCHOR_NUM_LEVELS 1
#define ANCHOR_NUM_PRIOS 1
#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
@@ -134,7 +137,7 @@ static struct init_tree_node {
ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
ETHTOOL_PRIO_NUM_LEVELS))),
ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
- ADD_NS(ADD_MULTIPLE_PRIO(1, 1),
+ ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, KERNEL_NIC_TC_NUM_LEVELS),
ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
KERNEL_NIC_PRIO_NUM_LEVELS))),
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
@@ -2026,16 +2029,6 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
return &steering->fdb_root_ns->ns;
else
return NULL;
- case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
- if (steering->esw_egress_root_ns)
- return &steering->esw_egress_root_ns->ns;
- else
- return NULL;
- case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
- if (steering->esw_ingress_root_ns)
- return &steering->esw_ingress_root_ns->ns;
- else
- return NULL;
case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
if (steering->sniffer_rx_root_ns)
return &steering->sniffer_rx_root_ns->ns;
@@ -2066,6 +2059,33 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL(mlx5_get_flow_namespace);
+struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type type,
+ int vport)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+
+ if (!steering || vport >= MLX5_TOTAL_VPORTS(dev))
+ return NULL;
+
+ switch (type) {
+ case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
+ if (steering->esw_egress_root_ns &&
+ steering->esw_egress_root_ns[vport])
+ return &steering->esw_egress_root_ns[vport]->ns;
+ else
+ return NULL;
+ case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
+ if (steering->esw_ingress_root_ns &&
+ steering->esw_ingress_root_ns[vport])
+ return &steering->esw_ingress_root_ns[vport]->ns;
+ else
+ return NULL;
+ default:
+ return NULL;
+ }
+}
+
static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
unsigned int prio, int num_levels)
{
@@ -2343,13 +2363,41 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
clean_tree(&root_ns->ns.node);
}
+static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int i;
+
+ if (!steering->esw_egress_root_ns)
+ return;
+
+ for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
+ cleanup_root_ns(steering->esw_egress_root_ns[i]);
+
+ kfree(steering->esw_egress_root_ns);
+}
+
+static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int i;
+
+ if (!steering->esw_ingress_root_ns)
+ return;
+
+ for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
+ cleanup_root_ns(steering->esw_ingress_root_ns[i]);
+
+ kfree(steering->esw_ingress_root_ns);
+}
+
void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
cleanup_root_ns(steering->root_ns);
- cleanup_root_ns(steering->esw_egress_root_ns);
- cleanup_root_ns(steering->esw_ingress_root_ns);
+ cleanup_egress_acls_root_ns(dev);
+ cleanup_ingress_acls_root_ns(dev);
cleanup_root_ns(steering->fdb_root_ns);
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
@@ -2418,34 +2466,86 @@ out_err:
return PTR_ERR(prio);
}
-static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering)
+static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
{
struct fs_prio *prio;
- steering->esw_egress_root_ns = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
- if (!steering->esw_egress_root_ns)
+ steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
+ if (!steering->esw_egress_root_ns[vport])
return -ENOMEM;
/* create 1 prio*/
- prio = fs_create_prio(&steering->esw_egress_root_ns->ns, 0,
- MLX5_TOTAL_VPORTS(steering->dev));
+ prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
return PTR_ERR_OR_ZERO(prio);
}
-static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering)
+static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
{
struct fs_prio *prio;
- steering->esw_ingress_root_ns = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
- if (!steering->esw_ingress_root_ns)
+ steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
+ if (!steering->esw_ingress_root_ns[vport])
return -ENOMEM;
/* create 1 prio*/
- prio = fs_create_prio(&steering->esw_ingress_root_ns->ns, 0,
- MLX5_TOTAL_VPORTS(steering->dev));
+ prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
return PTR_ERR_OR_ZERO(prio);
}
+static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int err;
+ int i;
+
+ steering->esw_egress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
+ sizeof(*steering->esw_egress_root_ns),
+ GFP_KERNEL);
+ if (!steering->esw_egress_root_ns)
+ return -ENOMEM;
+
+ for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
+ err = init_egress_acl_root_ns(steering, i);
+ if (err)
+ goto cleanup_root_ns;
+ }
+
+ return 0;
+
+cleanup_root_ns:
+ for (i--; i >= 0; i--)
+ cleanup_root_ns(steering->esw_egress_root_ns[i]);
+ kfree(steering->esw_egress_root_ns);
+ return err;
+}
+
+static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int err;
+ int i;
+
+ steering->esw_ingress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
+ sizeof(*steering->esw_ingress_root_ns),
+ GFP_KERNEL);
+ if (!steering->esw_ingress_root_ns)
+ return -ENOMEM;
+
+ for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
+ err = init_ingress_acl_root_ns(steering, i);
+ if (err)
+ goto cleanup_root_ns;
+ }
+
+ return 0;
+
+cleanup_root_ns:
+ for (i--; i >= 0; i--)
+ cleanup_root_ns(steering->esw_ingress_root_ns[i]);
+ kfree(steering->esw_ingress_root_ns);
+ return err;
+}
+
int mlx5_init_fs(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering;
@@ -2488,12 +2588,12 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err;
}
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
- err = init_egress_acl_root_ns(steering);
+ err = init_egress_acls_root_ns(dev);
if (err)
goto err;
}
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
- err = init_ingress_acl_root_ns(steering);
+ err = init_ingress_acls_root_ns(dev);
if (err)
goto err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 397d24a621a4..05262708f14b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -71,8 +71,8 @@ struct mlx5_flow_steering {
struct kmem_cache *ftes_cache;
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_root_namespace *fdb_root_ns;
- struct mlx5_flow_root_namespace *esw_egress_root_ns;
- struct mlx5_flow_root_namespace *esw_ingress_root_ns;
+ struct mlx5_flow_root_namespace **esw_egress_root_ns;
+ struct mlx5_flow_root_namespace **esw_ingress_root_ns;
struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
};
@@ -233,6 +233,8 @@ void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
unsigned long delay);
void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
unsigned long interval);
+int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id,
+ u64 *packets, u64 *bytes);
int mlx5_init_fs(struct mlx5_core_dev *dev);
void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 89d1f8650033..b7ab929d5f8e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -312,6 +312,12 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
}
}
+int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id,
+ u64 *packets, u64 *bytes)
+{
+ return mlx5_cmd_fc_query(dev, id, packets, bytes);
+}
+
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 6f338a9219c8..90cb50fe17fd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -254,4 +254,5 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
const struct ethtool_ops mlx5i_pkey_ethtool_ops = {
.get_drvinfo = mlx5i_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_ts_info = mlx5i_get_ts_info,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index a281d95ce17c..f953378bd13d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -41,7 +41,6 @@
static int mlx5i_open(struct net_device *netdev);
static int mlx5i_close(struct net_device *netdev);
static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu);
-static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
static const struct net_device_ops mlx5i_netdev_ops = {
.ndo_open = mlx5i_open,
@@ -242,7 +241,8 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
{
- int err;
+ struct ttc_params ttc_params = {};
+ int tt, err;
priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
@@ -257,14 +257,23 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
}
- err = mlx5e_create_inner_ttc_table(priv);
+ mlx5e_set_ttc_basic_params(priv, &ttc_params);
+ mlx5e_set_inner_ttc_ft_params(&ttc_params);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
+
+ err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
if (err) {
netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
err);
goto err_destroy_arfs_tables;
}
- err = mlx5e_create_ttc_table(priv);
+ mlx5e_set_ttc_ft_params(&ttc_params);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
+
+ err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
if (err) {
netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
err);
@@ -274,7 +283,7 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
return 0;
err_destroy_inner_ttc_table:
- mlx5e_destroy_inner_ttc_table(priv);
+ mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
err_destroy_arfs_tables:
mlx5e_arfs_destroy_tables(priv);
@@ -283,8 +292,8 @@ err_destroy_arfs_tables:
static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
{
- mlx5e_destroy_ttc_table(priv);
- mlx5e_destroy_inner_ttc_table(priv);
+ mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
+ mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
mlx5e_arfs_destroy_tables(priv);
}
@@ -398,7 +407,7 @@ int mlx5i_dev_init(struct net_device *dev)
return 0;
}
-static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
@@ -486,7 +495,7 @@ static int mlx5i_close(struct net_device *netdev)
mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
mlx5i_uninit_underlay_qp(epriv);
mlx5e_deactivate_priv_channels(epriv);
- mlx5e_close_channels(&epriv->channels);;
+ mlx5e_close_channels(&epriv->channels);
unlock:
mutex_unlock(&epriv->state_lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index 49008022c306..6d9053bcbe95 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -76,9 +76,10 @@ int mlx5i_pkey_del_qpn(struct net_device *netdev, u32 qpn);
/* Get the net-device corresponding to the given underlay QPN */
struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn);
-/* Shared ndo functionts */
+/* Shared ndo functions */
int mlx5i_dev_init(struct net_device *dev);
void mlx5i_dev_cleanup(struct net_device *dev);
+int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
/* Parent profile functions */
void mlx5i_init(struct mlx5_core_dev *mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 531b02cc979b..b69e9d847a6b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -140,6 +140,7 @@ static int mlx5i_pkey_close(struct net_device *netdev);
static int mlx5i_pkey_dev_init(struct net_device *dev);
static void mlx5i_pkey_dev_cleanup(struct net_device *netdev);
static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu);
+static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
static const struct net_device_ops mlx5i_pkey_netdev_ops = {
.ndo_open = mlx5i_pkey_open,
@@ -147,6 +148,7 @@ static const struct net_device_ops mlx5i_pkey_netdev_ops = {
.ndo_init = mlx5i_pkey_dev_init,
.ndo_uninit = mlx5i_pkey_dev_cleanup,
.ndo_change_mtu = mlx5i_pkey_change_mtu,
+ .ndo_do_ioctl = mlx5i_pkey_ioctl,
};
/* Child NDOs */
@@ -174,6 +176,11 @@ static int mlx5i_pkey_dev_init(struct net_device *dev)
return mlx5i_dev_init(dev);
}
+static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ return mlx5i_ioctl(dev, ifr, cmd);
+}
+
static void mlx5i_pkey_dev_cleanup(struct net_device *netdev)
{
return mlx5i_dev_cleanup(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index b05868728da7..394552f36fcf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -116,6 +116,7 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev);
struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
+u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq);
void mlx5_cq_tasklet_cb(unsigned long data);
int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 5e128d7a9ffd..9e38343a951f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -398,3 +398,217 @@ void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_rqt);
+
+static int mlx5_hairpin_create_rq(struct mlx5_core_dev *mdev,
+ struct mlx5_hairpin_params *params, u32 *rqn)
+{
+ u32 in[MLX5_ST_SZ_DW(create_rq_in)] = {0};
+ void *rqc, *wq;
+
+ rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+ wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+ MLX5_SET(rqc, rqc, hairpin, 1);
+ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
+ MLX5_SET(rqc, rqc, counter_set_id, params->q_counter);
+
+ MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size);
+ MLX5_SET(wq, wq, log_hairpin_num_packets, params->log_num_packets);
+
+ return mlx5_core_create_rq(mdev, in, MLX5_ST_SZ_BYTES(create_rq_in), rqn);
+}
+
+static int mlx5_hairpin_create_sq(struct mlx5_core_dev *mdev,
+ struct mlx5_hairpin_params *params, u32 *sqn)
+{
+ u32 in[MLX5_ST_SZ_DW(create_sq_in)] = {0};
+ void *sqc, *wq;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ MLX5_SET(sqc, sqc, hairpin, 1);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+
+ MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size);
+ MLX5_SET(wq, wq, log_hairpin_num_packets, params->log_num_packets);
+
+ return mlx5_core_create_sq(mdev, in, MLX5_ST_SZ_BYTES(create_sq_in), sqn);
+}
+
+static int mlx5_hairpin_create_queues(struct mlx5_hairpin *hp,
+ struct mlx5_hairpin_params *params)
+{
+ int i, j, err;
+
+ for (i = 0; i < hp->num_channels; i++) {
+ err = mlx5_hairpin_create_rq(hp->func_mdev, params, &hp->rqn[i]);
+ if (err)
+ goto out_err_rq;
+ }
+
+ for (i = 0; i < hp->num_channels; i++) {
+ err = mlx5_hairpin_create_sq(hp->peer_mdev, params, &hp->sqn[i]);
+ if (err)
+ goto out_err_sq;
+ }
+
+ return 0;
+
+out_err_sq:
+ for (j = 0; j < i; j++)
+ mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[j]);
+ i = hp->num_channels;
+out_err_rq:
+ for (j = 0; j < i; j++)
+ mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[j]);
+ return err;
+}
+
+static void mlx5_hairpin_destroy_queues(struct mlx5_hairpin *hp)
+{
+ int i;
+
+ for (i = 0; i < hp->num_channels; i++) {
+ mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[i]);
+ mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
+ }
+}
+
+static int mlx5_hairpin_modify_rq(struct mlx5_core_dev *func_mdev, u32 rqn,
+ int curr_state, int next_state,
+ u16 peer_vhca, u32 peer_sq)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {0};
+ void *rqc;
+
+ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+ if (next_state == MLX5_RQC_STATE_RDY) {
+ MLX5_SET(rqc, rqc, hairpin_peer_sq, peer_sq);
+ MLX5_SET(rqc, rqc, hairpin_peer_vhca, peer_vhca);
+ }
+
+ MLX5_SET(modify_rq_in, in, rq_state, curr_state);
+ MLX5_SET(rqc, rqc, state, next_state);
+
+ return mlx5_core_modify_rq(func_mdev, rqn,
+ in, MLX5_ST_SZ_BYTES(modify_rq_in));
+}
+
+static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn,
+ int curr_state, int next_state,
+ u16 peer_vhca, u32 peer_rq)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
+ void *sqc;
+
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+
+ if (next_state == MLX5_RQC_STATE_RDY) {
+ MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq);
+ MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca);
+ }
+
+ MLX5_SET(modify_sq_in, in, sq_state, curr_state);
+ MLX5_SET(sqc, sqc, state, next_state);
+
+ return mlx5_core_modify_sq(peer_mdev, sqn,
+ in, MLX5_ST_SZ_BYTES(modify_sq_in));
+}
+
+static int mlx5_hairpin_pair_queues(struct mlx5_hairpin *hp)
+{
+ int i, j, err;
+
+ /* set peer SQs */
+ for (i = 0; i < hp->num_channels; i++) {
+ err = mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i],
+ MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
+ MLX5_CAP_GEN(hp->func_mdev, vhca_id), hp->rqn[i]);
+ if (err)
+ goto err_modify_sq;
+ }
+
+ /* set func RQs */
+ for (i = 0; i < hp->num_channels; i++) {
+ err = mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i],
+ MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY,
+ MLX5_CAP_GEN(hp->peer_mdev, vhca_id), hp->sqn[i]);
+ if (err)
+ goto err_modify_rq;
+ }
+
+ return 0;
+
+err_modify_rq:
+ for (j = 0; j < i; j++)
+ mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[j], MLX5_RQC_STATE_RDY,
+ MLX5_RQC_STATE_RST, 0, 0);
+ i = hp->num_channels;
+err_modify_sq:
+ for (j = 0; j < i; j++)
+ mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[j], MLX5_SQC_STATE_RDY,
+ MLX5_SQC_STATE_RST, 0, 0);
+ return err;
+}
+
+static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
+{
+ int i;
+
+ /* unset func RQs */
+ for (i = 0; i < hp->num_channels; i++)
+ mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i], MLX5_RQC_STATE_RDY,
+ MLX5_RQC_STATE_RST, 0, 0);
+
+ /* unset peer SQs */
+ for (i = 0; i < hp->num_channels; i++)
+ mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
+ MLX5_SQC_STATE_RST, 0, 0);
+}
+
+struct mlx5_hairpin *
+mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
+ struct mlx5_core_dev *peer_mdev,
+ struct mlx5_hairpin_params *params)
+{
+ struct mlx5_hairpin *hp;
+ int size, err;
+
+ size = sizeof(*hp) + params->num_channels * 2 * sizeof(u32);
+ hp = kzalloc(size, GFP_KERNEL);
+ if (!hp)
+ return ERR_PTR(-ENOMEM);
+
+ hp->func_mdev = func_mdev;
+ hp->peer_mdev = peer_mdev;
+ hp->num_channels = params->num_channels;
+
+ hp->rqn = (void *)hp + sizeof(*hp);
+ hp->sqn = hp->rqn + params->num_channels;
+
+ /* alloc and pair func --> peer hairpin */
+ err = mlx5_hairpin_create_queues(hp, params);
+ if (err)
+ goto err_create_queues;
+
+ err = mlx5_hairpin_pair_queues(hp);
+ if (err)
+ goto err_pair_queues;
+
+ return hp;
+
+err_pair_queues:
+ mlx5_hairpin_destroy_queues(hp);
+err_create_queues:
+ kfree(hp);
+ return ERR_PTR(err);
+}
+
+void mlx5_core_hairpin_destroy(struct mlx5_hairpin *hp)
+{
+ mlx5_hairpin_unpair_queues(hp);
+ mlx5_hairpin_destroy_queues(hp);
+ kfree(hp);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index f3315bc874ad..3529b545675d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -113,6 +113,7 @@ struct mlxsw_core {
struct mlxsw_thermal *thermal;
struct mlxsw_core_port *ports;
unsigned int max_ports;
+ bool reload_fail;
unsigned long driver_priv[0];
/* driver_priv has to be always the last item */
};
@@ -962,7 +963,28 @@ mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
pool_type, p_cur, p_max);
}
+static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ const struct mlxsw_bus *mlxsw_bus = mlxsw_core->bus;
+ int err;
+
+ if (!mlxsw_bus->reset)
+ return -EOPNOTSUPP;
+
+ mlxsw_core_bus_device_unregister(mlxsw_core, true);
+ mlxsw_bus->reset(mlxsw_core->bus_priv);
+ err = mlxsw_core_bus_device_register(mlxsw_core->bus_info,
+ mlxsw_core->bus,
+ mlxsw_core->bus_priv, true,
+ devlink);
+ if (err)
+ mlxsw_core->reload_fail = true;
+ return err;
+}
+
static const struct devlink_ops mlxsw_devlink_ops = {
+ .reload = mlxsw_devlink_core_bus_device_reload,
.port_type_set = mlxsw_devlink_port_type_set,
.port_split = mlxsw_devlink_port_split,
.port_unsplit = mlxsw_devlink_port_unsplit,
@@ -980,23 +1002,26 @@ static const struct devlink_ops mlxsw_devlink_ops = {
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
- void *bus_priv)
+ void *bus_priv, bool reload,
+ struct devlink *devlink)
{
const char *device_kind = mlxsw_bus_info->device_kind;
struct mlxsw_core *mlxsw_core;
struct mlxsw_driver *mlxsw_driver;
- struct devlink *devlink;
size_t alloc_size;
int err;
mlxsw_driver = mlxsw_core_driver_get(device_kind);
if (!mlxsw_driver)
return -EINVAL;
- alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
- devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
- if (!devlink) {
- err = -ENOMEM;
- goto err_devlink_alloc;
+
+ if (!reload) {
+ alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
+ devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
+ if (!devlink) {
+ err = -ENOMEM;
+ goto err_devlink_alloc;
+ }
}
mlxsw_core = devlink_priv(devlink);
@@ -1012,6 +1037,12 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_bus_init;
+ if (mlxsw_driver->resources_register && !reload) {
+ err = mlxsw_driver->resources_register(mlxsw_core);
+ if (err)
+ goto err_register_resources;
+ }
+
err = mlxsw_ports_init(mlxsw_core);
if (err)
goto err_ports_init;
@@ -1032,9 +1063,11 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_emad_init;
- err = devlink_register(devlink, mlxsw_bus_info->dev);
- if (err)
- goto err_devlink_register;
+ if (!reload) {
+ err = devlink_register(devlink, mlxsw_bus_info->dev);
+ if (err)
+ goto err_devlink_register;
+ }
err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
if (err)
@@ -1057,7 +1090,8 @@ err_driver_init:
mlxsw_thermal_fini(mlxsw_core->thermal);
err_thermal_init:
err_hwmon_init:
- devlink_unregister(devlink);
+ if (!reload)
+ devlink_unregister(devlink);
err_devlink_register:
mlxsw_emad_fini(mlxsw_core);
err_emad_init:
@@ -1067,26 +1101,40 @@ err_alloc_lag_mapping:
err_ports_init:
mlxsw_bus->fini(bus_priv);
err_bus_init:
- devlink_free(devlink);
+ if (!reload)
+ devlink_resources_unregister(devlink, NULL);
+err_register_resources:
+ if (!reload)
+ devlink_free(devlink);
err_devlink_alloc:
mlxsw_core_driver_put(device_kind);
return err;
}
EXPORT_SYMBOL(mlxsw_core_bus_device_register);
-void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
+ bool reload)
{
const char *device_kind = mlxsw_core->bus_info->device_kind;
struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ if (mlxsw_core->reload_fail)
+ goto reload_fail;
+
if (mlxsw_core->driver->fini)
mlxsw_core->driver->fini(mlxsw_core);
mlxsw_thermal_fini(mlxsw_core->thermal);
- devlink_unregister(devlink);
+ if (!reload)
+ devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
kfree(mlxsw_core->lag.mapping);
mlxsw_ports_fini(mlxsw_core);
+ if (!reload)
+ devlink_resources_unregister(devlink, NULL);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
+ if (reload)
+ return;
+reload_fail:
devlink_free(devlink);
mlxsw_core_driver_put(device_kind);
}
@@ -1791,6 +1839,22 @@ void mlxsw_core_flush_owq(void)
}
EXPORT_SYMBOL(mlxsw_core_flush_owq);
+int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile,
+ u64 *p_single_size, u64 *p_double_size,
+ u64 *p_linear_size)
+{
+ struct mlxsw_driver *driver = mlxsw_core->driver;
+
+ if (!driver->kvd_sizes_get)
+ return -EINVAL;
+
+ return driver->kvd_sizes_get(mlxsw_core, profile,
+ p_single_size, p_double_size,
+ p_linear_size);
+}
+EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
+
static int __init mlxsw_core_module_init(void)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 6e966af72fc4..5ddafd74dc00 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -66,8 +66,9 @@ void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
- void *bus_priv);
-void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core);
+ void *bus_priv, bool reload,
+ struct devlink *devlink);
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, bool reload);
struct mlxsw_tx_info {
u8 local_port;
@@ -308,10 +309,20 @@ struct mlxsw_driver {
u32 *p_cur, u32 *p_max);
void (*txhdr_construct)(struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
+ int (*resources_register)(struct mlxsw_core *mlxsw_core);
+ int (*kvd_sizes_get)(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile,
+ u64 *p_single_size, u64 *p_double_size,
+ u64 *p_linear_size);
u8 txhdr_len;
const struct mlxsw_config_profile *profile;
};
+int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile,
+ u64 *p_single_size, u64 *p_double_size,
+ u64 *p_linear_size);
+
bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
enum mlxsw_res_id res_id);
@@ -332,6 +343,7 @@ struct mlxsw_bus {
const struct mlxsw_config_profile *profile,
struct mlxsw_res *res);
void (*fini)(void *bus_priv);
+ void (*reset)(void *bus_priv);
bool (*skb_transmit_busy)(void *bus_priv,
const struct mlxsw_tx_info *tx_info);
int (*skb_transmit)(void *bus_priv, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 6a979a09ab72..b698fb481b2e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -310,9 +310,33 @@ struct mlxsw_afa_block {
struct mlxsw_afa_set *first_set;
struct mlxsw_afa_set *cur_set;
unsigned int cur_act_index; /* In current set. */
- struct list_head fwd_entry_ref_list;
+ struct list_head resource_list; /* List of resources held by actions
+ * in this block.
+ */
};
+struct mlxsw_afa_resource {
+ struct list_head list;
+ void (*destructor)(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource);
+};
+
+static void mlxsw_afa_resource_add(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource)
+{
+ list_add(&resource->list, &block->resource_list);
+}
+
+static void mlxsw_afa_resources_destroy(struct mlxsw_afa_block *block)
+{
+ struct mlxsw_afa_resource *resource, *tmp;
+
+ list_for_each_entry_safe(resource, tmp, &block->resource_list, list) {
+ list_del(&resource->list);
+ resource->destructor(block, resource);
+ }
+}
+
struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
{
struct mlxsw_afa_block *block;
@@ -320,7 +344,7 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
block = kzalloc(sizeof(*block), GFP_KERNEL);
if (!block)
return NULL;
- INIT_LIST_HEAD(&block->fwd_entry_ref_list);
+ INIT_LIST_HEAD(&block->resource_list);
block->afa = mlxsw_afa;
/* At least one action set is always present, so just create it here */
@@ -336,8 +360,6 @@ err_first_set_create:
}
EXPORT_SYMBOL(mlxsw_afa_block_create);
-static void mlxsw_afa_fwd_entry_refs_destroy(struct mlxsw_afa_block *block);
-
void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block)
{
struct mlxsw_afa_set *set = block->first_set;
@@ -348,7 +370,7 @@ void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block)
mlxsw_afa_set_put(block->afa, set);
set = next_set;
} while (set);
- mlxsw_afa_fwd_entry_refs_destroy(block);
+ mlxsw_afa_resources_destroy(block);
kfree(block);
}
EXPORT_SYMBOL(mlxsw_afa_block_destroy);
@@ -489,10 +511,29 @@ static void mlxsw_afa_fwd_entry_put(struct mlxsw_afa *mlxsw_afa,
}
struct mlxsw_afa_fwd_entry_ref {
- struct list_head list;
+ struct mlxsw_afa_resource resource;
struct mlxsw_afa_fwd_entry *fwd_entry;
};
+static void
+mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref)
+{
+ mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry);
+ kfree(fwd_entry_ref);
+}
+
+static void
+mlxsw_afa_fwd_entry_ref_destructor(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource)
+{
+ struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
+
+ fwd_entry_ref = container_of(resource, struct mlxsw_afa_fwd_entry_ref,
+ resource);
+ mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
+}
+
static struct mlxsw_afa_fwd_entry_ref *
mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port)
{
@@ -509,7 +550,8 @@ mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port)
goto err_fwd_entry_get;
}
fwd_entry_ref->fwd_entry = fwd_entry;
- list_add(&fwd_entry_ref->list, &block->fwd_entry_ref_list);
+ fwd_entry_ref->resource.destructor = mlxsw_afa_fwd_entry_ref_destructor;
+ mlxsw_afa_resource_add(block, &fwd_entry_ref->resource);
return fwd_entry_ref;
err_fwd_entry_get:
@@ -517,23 +559,51 @@ err_fwd_entry_get:
return ERR_PTR(err);
}
+struct mlxsw_afa_counter {
+ struct mlxsw_afa_resource resource;
+ u32 counter_index;
+};
+
static void
-mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block,
- struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref)
+mlxsw_afa_counter_destroy(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_counter *counter)
{
- list_del(&fwd_entry_ref->list);
- mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry);
- kfree(fwd_entry_ref);
+ block->afa->ops->counter_index_put(block->afa->ops_priv,
+ counter->counter_index);
+ kfree(counter);
+}
+
+static void
+mlxsw_afa_counter_destructor(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource)
+{
+ struct mlxsw_afa_counter *counter;
+
+ counter = container_of(resource, struct mlxsw_afa_counter, resource);
+ mlxsw_afa_counter_destroy(block, counter);
}
-static void mlxsw_afa_fwd_entry_refs_destroy(struct mlxsw_afa_block *block)
+static struct mlxsw_afa_counter *
+mlxsw_afa_counter_create(struct mlxsw_afa_block *block)
{
- struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
- struct mlxsw_afa_fwd_entry_ref *tmp;
+ struct mlxsw_afa_counter *counter;
+ int err;
+
+ counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+ if (!counter)
+ return ERR_PTR(-ENOMEM);
+
+ err = block->afa->ops->counter_index_get(block->afa->ops_priv,
+ &counter->counter_index);
+ if (err)
+ goto err_counter_index_get;
+ counter->resource.destructor = mlxsw_afa_counter_destructor;
+ mlxsw_afa_resource_add(block, &counter->resource);
+ return counter;
- list_for_each_entry_safe(fwd_entry_ref, tmp,
- &block->fwd_entry_ref_list, list)
- mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
+err_counter_index_get:
+ kfree(counter);
+ return ERR_PTR(err);
}
#define MLXSW_AFA_ONE_ACTION_LEN 32
@@ -690,6 +760,16 @@ MLXSW_ITEM32(afa, trapdisc, forward_action, 0x00, 0, 4);
*/
MLXSW_ITEM32(afa, trapdisc, trap_id, 0x04, 0, 9);
+/* afa_trapdisc_mirror_agent
+ * Mirror agent.
+ */
+MLXSW_ITEM32(afa, trapdisc, mirror_agent, 0x08, 29, 3);
+
+/* afa_trapdisc_mirror_enable
+ * Mirror enable.
+ */
+MLXSW_ITEM32(afa, trapdisc, mirror_enable, 0x08, 24, 1);
+
static inline void
mlxsw_afa_trapdisc_pack(char *payload,
enum mlxsw_afa_trapdisc_trap_action trap_action,
@@ -701,6 +781,14 @@ mlxsw_afa_trapdisc_pack(char *payload,
mlxsw_afa_trapdisc_trap_id_set(payload, trap_id);
}
+static inline void
+mlxsw_afa_trapdisc_mirror_pack(char *payload, bool mirror_enable,
+ u8 mirror_agent)
+{
+ mlxsw_afa_trapdisc_mirror_enable_set(payload, mirror_enable);
+ mlxsw_afa_trapdisc_mirror_agent_set(payload, mirror_agent);
+}
+
int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block)
{
char *act = mlxsw_afa_block_append_action(block,
@@ -746,6 +834,104 @@ int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
}
EXPORT_SYMBOL(mlxsw_afa_block_append_trap_and_forward);
+struct mlxsw_afa_mirror {
+ struct mlxsw_afa_resource resource;
+ int span_id;
+ u8 local_in_port;
+ u8 local_out_port;
+ bool ingress;
+};
+
+static void
+mlxsw_afa_mirror_destroy(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_mirror *mirror)
+{
+ block->afa->ops->mirror_del(block->afa->ops_priv,
+ mirror->local_in_port,
+ mirror->local_out_port,
+ mirror->ingress);
+ kfree(mirror);
+}
+
+static void
+mlxsw_afa_mirror_destructor(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource)
+{
+ struct mlxsw_afa_mirror *mirror;
+
+ mirror = container_of(resource, struct mlxsw_afa_mirror, resource);
+ mlxsw_afa_mirror_destroy(block, mirror);
+}
+
+static struct mlxsw_afa_mirror *
+mlxsw_afa_mirror_create(struct mlxsw_afa_block *block,
+ u8 local_in_port, u8 local_out_port,
+ bool ingress)
+{
+ struct mlxsw_afa_mirror *mirror;
+ int err;
+
+ mirror = kzalloc(sizeof(*mirror), GFP_KERNEL);
+ if (!mirror)
+ return ERR_PTR(-ENOMEM);
+
+ err = block->afa->ops->mirror_add(block->afa->ops_priv,
+ local_in_port, local_out_port,
+ ingress, &mirror->span_id);
+ if (err)
+ goto err_mirror_add;
+
+ mirror->ingress = ingress;
+ mirror->local_out_port = local_out_port;
+ mirror->local_in_port = local_in_port;
+ mirror->resource.destructor = mlxsw_afa_mirror_destructor;
+ mlxsw_afa_resource_add(block, &mirror->resource);
+ return mirror;
+
+err_mirror_add:
+ kfree(mirror);
+ return ERR_PTR(err);
+}
+
+static int
+mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
+ u8 mirror_agent)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_TRAPDISC_CODE,
+ MLXSW_AFA_TRAPDISC_SIZE);
+ if (!act)
+ return -ENOBUFS;
+ mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
+ MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD, 0);
+ mlxsw_afa_trapdisc_mirror_pack(act, true, mirror_agent);
+ return 0;
+}
+
+int
+mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block,
+ u8 local_in_port, u8 local_out_port, bool ingress)
+{
+ struct mlxsw_afa_mirror *mirror;
+ int err;
+
+ mirror = mlxsw_afa_mirror_create(block, local_in_port, local_out_port,
+ ingress);
+ if (IS_ERR(mirror))
+ return PTR_ERR(mirror);
+
+ err = mlxsw_afa_block_append_allocated_mirror(block, mirror->span_id);
+ if (err)
+ goto err_append_allocated_mirror;
+
+ return 0;
+
+err_append_allocated_mirror:
+ mlxsw_afa_mirror_destroy(block, mirror);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_mirror);
+
/* Forwarding Action
* -----------------
* Forwarding Action can be used to implement Policy Based Switching (PBS)
@@ -853,11 +1039,10 @@ mlxsw_afa_polcnt_pack(char *payload,
mlxsw_afa_polcnt_counter_index_set(payload, counter_index);
}
-int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
- u32 counter_index)
+int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
+ u32 counter_index)
{
- char *act = mlxsw_afa_block_append_action(block,
- MLXSW_AFA_POLCNT_CODE,
+ char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_POLCNT_CODE,
MLXSW_AFA_POLCNT_SIZE);
if (!act)
return -ENOBUFS;
@@ -865,6 +1050,32 @@ int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
counter_index);
return 0;
}
+EXPORT_SYMBOL(mlxsw_afa_block_append_allocated_counter);
+
+int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
+ u32 *p_counter_index)
+{
+ struct mlxsw_afa_counter *counter;
+ u32 counter_index;
+ int err;
+
+ counter = mlxsw_afa_counter_create(block);
+ if (IS_ERR(counter))
+ return PTR_ERR(counter);
+ counter_index = counter->counter_index;
+
+ err = mlxsw_afa_block_append_allocated_counter(block, counter_index);
+ if (err)
+ goto err_append_allocated_counter;
+
+ if (p_counter_index)
+ *p_counter_index = counter_index;
+ return 0;
+
+err_append_allocated_counter:
+ mlxsw_afa_counter_destroy(block, counter);
+ return err;
+}
EXPORT_SYMBOL(mlxsw_afa_block_append_counter);
/* Virtual Router and Forwarding Domain Action
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index a8d3314c3a24..43132293475c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -46,6 +46,12 @@ struct mlxsw_afa_ops {
void (*kvdl_set_del)(void *priv, u32 kvdl_index, bool is_first);
int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u8 local_port);
void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index);
+ int (*counter_index_get)(void *priv, unsigned int *p_counter_index);
+ void (*counter_index_put)(void *priv, unsigned int counter_index);
+ int (*mirror_add)(void *priv, u8 locol_in_port, u8 local_out_port,
+ bool ingress, int *p_span_id);
+ void (*mirror_del)(void *priv, u8 locol_in_port, u8 local_out_port,
+ bool ingress);
};
struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set,
@@ -63,12 +69,17 @@ int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block);
int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id);
int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
u16 trap_id);
+int mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block,
+ u8 local_in_port, u8 local_out_port,
+ bool ingress);
int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
u8 local_port, bool in_port);
int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
u16 vid, u8 pcp, u8 et);
+int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
+ u32 counter_index);
int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
- u32 counter_index);
+ u32 *p_counter_index);
int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid);
int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
u16 expected_irif, u16 min_mtu,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index c0dcfa05b077..25f9915ebd82 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -539,7 +539,8 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
mlxsw_i2c->dev = &client->dev;
err = mlxsw_core_bus_device_register(&mlxsw_i2c->bus_info,
- &mlxsw_i2c_bus, mlxsw_i2c);
+ &mlxsw_i2c_bus, mlxsw_i2c, false,
+ NULL);
if (err) {
dev_err(&client->dev, "Fail to register core bus\n");
return err;
@@ -557,7 +558,7 @@ static int mlxsw_i2c_remove(struct i2c_client *client)
{
struct mlxsw_i2c *mlxsw_i2c = i2c_get_clientdata(client);
- mlxsw_core_bus_device_unregister(mlxsw_i2c->core);
+ mlxsw_core_bus_device_unregister(mlxsw_i2c->core, false);
mutex_destroy(&mlxsw_i2c->cmd.lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
index 28427f0758c7..31c886edc791 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/item.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -42,7 +42,7 @@
struct mlxsw_item {
unsigned short offset; /* bytes in container */
- unsigned short step; /* step in bytes for indexed items */
+ short step; /* step in bytes for indexed items */
unsigned short in_step_offset; /* offset within one step */
unsigned char shift; /* shift in bits */
unsigned char element_size; /* size of element in bit array */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 6ef20e5cc77d..85faa87bf42d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -154,6 +154,7 @@ struct mlxsw_pci {
} comp;
} cmd;
struct mlxsw_bus_info bus_info;
+ const struct pci_device_id *id;
};
static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
@@ -1052,38 +1053,18 @@ static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox,
}
static int
-mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_config_profile *profile,
+mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci *mlxsw_pci,
+ const struct mlxsw_config_profile *profile,
struct mlxsw_res *res)
{
- u32 single_size, double_size, linear_size;
-
- if (!MLXSW_RES_VALID(res, KVD_SINGLE_MIN_SIZE) ||
- !MLXSW_RES_VALID(res, KVD_DOUBLE_MIN_SIZE) ||
- !profile->used_kvd_split_data)
- return -EIO;
-
- linear_size = profile->kvd_linear_size;
+ u64 single_size, double_size, linear_size;
+ int err;
- /* The hash part is what left of the kvd without the
- * linear part. It is split to the single size and
- * double size by the parts ratio from the profile.
- * Both sizes must be a multiplications of the
- * granularity from the profile.
- */
- double_size = MLXSW_RES_GET(res, KVD_SIZE) - linear_size;
- double_size *= profile->kvd_hash_double_parts;
- double_size /= profile->kvd_hash_double_parts +
- profile->kvd_hash_single_parts;
- double_size /= profile->kvd_hash_granularity;
- double_size *= profile->kvd_hash_granularity;
- single_size = MLXSW_RES_GET(res, KVD_SIZE) - double_size -
- linear_size;
-
- /* Check results are legal. */
- if (single_size < MLXSW_RES_GET(res, KVD_SINGLE_MIN_SIZE) ||
- double_size < MLXSW_RES_GET(res, KVD_DOUBLE_MIN_SIZE) ||
- MLXSW_RES_GET(res, KVD_SIZE) < linear_size)
- return -EIO;
+ err = mlxsw_core_kvd_sizes_get(mlxsw_pci->core, profile,
+ &single_size, &double_size,
+ &linear_size);
+ if (err)
+ return err;
MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size);
MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size);
@@ -1184,7 +1165,7 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mbox, profile->adaptive_routing_group_cap);
}
if (MLXSW_RES_VALID(res, KVD_SIZE)) {
- err = mlxsw_pci_profile_get_kvd_sizes(profile, res);
+ err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
if (err)
return err;
@@ -1622,16 +1603,6 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
return err;
}
-static const struct mlxsw_bus mlxsw_pci_bus = {
- .kind = "pci",
- .init = mlxsw_pci_init,
- .fini = mlxsw_pci_fini,
- .skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
- .skb_transmit = mlxsw_pci_skb_transmit,
- .cmd_exec = mlxsw_pci_cmd_exec,
- .features = MLXSW_BUS_F_TXRX,
-};
-
static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
const struct pci_device_id *id)
{
@@ -1660,6 +1631,41 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
return 0;
}
+static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
+{
+ pci_free_irq_vectors(mlxsw_pci->pdev);
+}
+
+static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
+{
+ int err;
+
+ err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX);
+ if (err < 0)
+ dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n");
+ return err;
+}
+
+static void mlxsw_pci_reset(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+ mlxsw_pci_free_irq_vectors(mlxsw_pci);
+ mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
+ mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
+}
+
+static const struct mlxsw_bus mlxsw_pci_bus = {
+ .kind = "pci",
+ .init = mlxsw_pci_init,
+ .fini = mlxsw_pci_fini,
+ .skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
+ .skb_transmit = mlxsw_pci_skb_transmit,
+ .cmd_exec = mlxsw_pci_cmd_exec,
+ .features = MLXSW_BUS_F_TXRX,
+ .reset = mlxsw_pci_reset,
+};
+
static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
const char *driver_name = pdev->driver->name;
@@ -1721,7 +1727,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_sw_reset;
}
- err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
+ err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
if (err < 0) {
dev_err(&pdev->dev, "MSI-X init failed\n");
goto err_msix_init;
@@ -1730,9 +1736,11 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mlxsw_pci->bus_info.device_kind = driver_name;
mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
mlxsw_pci->bus_info.dev = &pdev->dev;
+ mlxsw_pci->id = id;
err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
- &mlxsw_pci_bus, mlxsw_pci);
+ &mlxsw_pci_bus, mlxsw_pci, false,
+ NULL);
if (err) {
dev_err(&pdev->dev, "cannot register bus device\n");
goto err_bus_device_register;
@@ -1741,7 +1749,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_bus_device_register:
- pci_free_irq_vectors(mlxsw_pci->pdev);
+ mlxsw_pci_free_irq_vectors(mlxsw_pci);
err_msix_init:
err_sw_reset:
iounmap(mlxsw_pci->hw_addr);
@@ -1760,8 +1768,8 @@ static void mlxsw_pci_remove(struct pci_dev *pdev)
{
struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
- mlxsw_core_bus_device_unregister(mlxsw_pci->core);
- pci_free_irq_vectors(mlxsw_pci->pdev);
+ mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
+ mlxsw_pci_free_irq_vectors(mlxsw_pci);
iounmap(mlxsw_pci->hw_addr);
pci_release_regions(mlxsw_pci->pdev);
pci_disable_device(mlxsw_pci->pdev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 6c4e08b8058a..0e08be41c8e0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4827,6 +4827,42 @@ static inline void mlxsw_reg_ratr_counter_pack(char *payload, u64 counter_index,
mlxsw_reg_ratr_counter_set_type_set(payload, set_type);
}
+/* RDPM - Router DSCP to Priority Mapping
+ * --------------------------------------
+ * Controls the mapping from DSCP field to switch priority on routed packets
+ */
+#define MLXSW_REG_RDPM_ID 0x8009
+#define MLXSW_REG_RDPM_BASE_LEN 0x00
+#define MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN 0x01
+#define MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT 64
+#define MLXSW_REG_RDPM_LEN 0x40
+#define MLXSW_REG_RDPM_LAST_ENTRY (MLXSW_REG_RDPM_BASE_LEN + \
+ MLXSW_REG_RDPM_LEN - \
+ MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN)
+
+MLXSW_REG_DEFINE(rdpm, MLXSW_REG_RDPM_ID, MLXSW_REG_RDPM_LEN);
+
+/* reg_dscp_entry_e
+ * Enable update of the specific entry
+ * Access: Index
+ */
+MLXSW_ITEM8_INDEXED(reg, rdpm, dscp_entry_e, MLXSW_REG_RDPM_LAST_ENTRY, 7, 1,
+ -MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_dscp_entry_prio
+ * Switch Priority
+ * Access: RW
+ */
+MLXSW_ITEM8_INDEXED(reg, rdpm, dscp_entry_prio, MLXSW_REG_RDPM_LAST_ENTRY, 0, 4,
+ -MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_rdpm_pack(char *payload, unsigned short index,
+ u8 prio)
+{
+ mlxsw_reg_rdpm_dscp_entry_e_set(payload, index, 1);
+ mlxsw_reg_rdpm_dscp_entry_prio_set(payload, index, prio);
+}
+
/* RICNT - Router Interface Counter Register
* -----------------------------------------
* The RICNT register retrieves per port performance counters
@@ -7640,6 +7676,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(rtar),
MLXSW_REG(ratr),
MLXSW_REG(rtdp),
+ MLXSW_REG(rdpm),
MLXSW_REG(ricnt),
MLXSW_REG(rrcr),
MLXSW_REG(ralta),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index c3837ca7a705..3dcc58d61506 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -76,12 +76,7 @@
#define MLXSW_FWREV_MAJOR 13
#define MLXSW_FWREV_MINOR 1530
#define MLXSW_FWREV_SUBMINOR 152
-
-static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = {
- .major = MLXSW_FWREV_MAJOR,
- .minor = MLXSW_FWREV_MINOR,
- .subminor = MLXSW_FWREV_SUBMINOR
-};
+#define MLXSW_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
#define MLXSW_SP_FW_FILENAME \
"mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \
@@ -339,28 +334,25 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
}
-static bool mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev *a,
- const struct mlxsw_fw_rev *b)
-{
- if (a->major != b->major)
- return a->major > b->major;
- if (a->minor != b->minor)
- return a->minor > b->minor;
- return a->subminor >= b->subminor;
-}
-
static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
{
const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
const struct firmware *firmware;
int err;
- if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev))
+ /* Validate driver & FW are compatible */
+ if (rev->major != MLXSW_FWREV_MAJOR) {
+ WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
+ rev->major, MLXSW_FWREV_MAJOR);
+ return -EINVAL;
+ }
+ if (MLXSW_FWREV_MINOR_TO_BRANCH(rev->minor) ==
+ MLXSW_FWREV_MINOR_TO_BRANCH(MLXSW_FWREV_MINOR))
return 0;
- dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n",
+ dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n",
rev->major, rev->minor, rev->subminor);
- dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n",
+ dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n",
MLXSW_SP_FW_FILENAME);
err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME,
@@ -576,7 +568,7 @@ static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
span_entry->used = false;
}
-static struct mlxsw_sp_span_entry *
+struct mlxsw_sp_span_entry *
mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
int i;
@@ -677,13 +669,28 @@ mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
static int
mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type)
+ enum mlxsw_sp_span_type type,
+ bool bind)
{
- struct mlxsw_sp_span_inspected_port *inspected_port;
struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
char mpar_pl[MLXSW_REG_MPAR_LEN];
- char sbib_pl[MLXSW_REG_SBIB_LEN];
int pa_id = span_entry->id;
+
+ /* bind the port to the SPAN entry */
+ mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
+ (enum mlxsw_reg_mpar_i_e) type, bind, pa_id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
+}
+
+static int
+mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
+ struct mlxsw_sp_span_entry *span_entry,
+ enum mlxsw_sp_span_type type,
+ bool bind)
+{
+ struct mlxsw_sp_span_inspected_port *inspected_port;
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
int err;
/* if it is an egress SPAN, bind a shared buffer to it */
@@ -699,12 +706,12 @@ mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
}
}
- /* bind the port to the SPAN entry */
- mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
- (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
- if (err)
- goto err_mpar_reg_write;
+ if (bind) {
+ err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
+ true);
+ if (err)
+ goto err_port_bind;
+ }
inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
if (!inspected_port) {
@@ -717,8 +724,11 @@ mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
return 0;
-err_mpar_reg_write:
err_inspected_port_alloc:
+ if (bind)
+ mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
+ false);
+err_port_bind:
if (type == MLXSW_SP_SPAN_EGRESS) {
mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
@@ -727,25 +737,22 @@ err_inspected_port_alloc:
}
static void
-mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
- struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type)
+mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,
+ struct mlxsw_sp_span_entry *span_entry,
+ enum mlxsw_sp_span_type type,
+ bool bind)
{
struct mlxsw_sp_span_inspected_port *inspected_port;
struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- char mpar_pl[MLXSW_REG_MPAR_LEN];
char sbib_pl[MLXSW_REG_SBIB_LEN];
- int pa_id = span_entry->id;
inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
if (!inspected_port)
return;
- /* remove the inspected port */
- mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
- (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
-
+ if (bind)
+ mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
+ false);
/* remove the SBIB buffer if it was egress SPAN */
if (type == MLXSW_SP_SPAN_EGRESS) {
mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
@@ -758,9 +765,9 @@ mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
kfree(inspected_port);
}
-static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
- struct mlxsw_sp_port *to,
- enum mlxsw_sp_span_type type)
+int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
+ struct mlxsw_sp_port *to,
+ enum mlxsw_sp_span_type type, bool bind)
{
struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
struct mlxsw_sp_span_entry *span_entry;
@@ -773,7 +780,7 @@ static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
span_entry->id);
- err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
+ err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind);
if (err)
goto err_port_bind;
@@ -784,9 +791,8 @@ err_port_bind:
return err;
}
-static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
- u8 destination_port,
- enum mlxsw_sp_span_type type)
+void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, u8 destination_port,
+ enum mlxsw_sp_span_type type, bool bind)
{
struct mlxsw_sp_span_entry *span_entry;
@@ -799,7 +805,7 @@ static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
span_entry->id);
- mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
+ mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
}
static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -1571,14 +1577,11 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
const struct tc_action *a,
bool ingress)
{
- struct net *net = dev_net(mlxsw_sp_port->dev);
enum mlxsw_sp_span_type span_type;
struct mlxsw_sp_port *to_port;
struct net_device *to_dev;
- int ifindex;
- ifindex = tcf_mirred_ifindex(a);
- to_dev = __dev_get_by_index(net, ifindex);
+ to_dev = tcf_mirred_dev(a);
if (!to_dev) {
netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
return -EINVAL;
@@ -1593,7 +1596,8 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
mirror->to_local_port = to_port->local_port;
mirror->ingress = ingress;
span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
+ return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type,
+ true);
}
static void
@@ -1604,8 +1608,8 @@ mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
span_type = mirror->ingress ?
MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- mlxsw_sp_span_mirror_remove(mlxsw_sp_port, mirror->to_local_port,
- span_type);
+ mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->to_local_port,
+ span_type, true);
}
static int
@@ -1734,9 +1738,6 @@ static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_cls_matchall_offload *f,
bool ingress)
{
- if (f->common.chain_index)
- return -EOPNOTSUPP;
-
switch (f->command) {
case TC_CLSMATCHALL_REPLACE:
return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f,
@@ -1750,72 +1751,187 @@ static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
-mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_cls_flower_offload *f,
- bool ingress)
+mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block,
+ struct tc_cls_flower_offload *f)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block);
+
switch (f->command) {
case TC_CLSFLOWER_REPLACE:
- return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f);
+ return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f);
case TC_CLSFLOWER_DESTROY:
- mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, f);
+ mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f);
return 0;
case TC_CLSFLOWER_STATS:
- return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, f);
+ return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f);
default:
return -EOPNOTSUPP;
}
}
-static int mlxsw_sp_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv, bool ingress)
+static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv, bool ingress)
{
struct mlxsw_sp_port *mlxsw_sp_port = cb_priv;
- if (!tc_can_offload(mlxsw_sp_port->dev))
- return -EOPNOTSUPP;
-
switch (type) {
case TC_SETUP_CLSMATCHALL:
+ if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev,
+ type_data))
+ return -EOPNOTSUPP;
+
return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data,
ingress);
case TC_SETUP_CLSFLOWER:
- return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data,
- ingress);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv)
+{
+ return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
+ cb_priv, true);
+}
+
+static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv)
+{
+ return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
+ cb_priv, false);
+}
+
+static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct mlxsw_sp_acl_block *acl_block = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ return 0;
+ case TC_SETUP_CLSFLOWER:
+ if (mlxsw_sp_acl_block_disabled(acl_block))
+ return -EOPNOTSUPP;
+
+ return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data);
default:
return -EOPNOTSUPP;
}
}
-static int mlxsw_sp_setup_tc_block_cb_ig(enum tc_setup_type type,
- void *type_data, void *cb_priv)
+static int
+mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tcf_block *block, bool ingress)
{
- return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, true);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_acl_block *acl_block;
+ struct tcf_block_cb *block_cb;
+ int err;
+
+ block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
+ mlxsw_sp);
+ if (!block_cb) {
+ acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, block->net);
+ if (!acl_block)
+ return -ENOMEM;
+ block_cb = __tcf_block_cb_register(block,
+ mlxsw_sp_setup_tc_block_cb_flower,
+ mlxsw_sp, acl_block);
+ if (IS_ERR(block_cb)) {
+ err = PTR_ERR(block_cb);
+ goto err_cb_register;
+ }
+ } else {
+ acl_block = tcf_block_cb_priv(block_cb);
+ }
+ tcf_block_cb_incref(block_cb);
+ err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block,
+ mlxsw_sp_port, ingress);
+ if (err)
+ goto err_block_bind;
+
+ if (ingress)
+ mlxsw_sp_port->ing_acl_block = acl_block;
+ else
+ mlxsw_sp_port->eg_acl_block = acl_block;
+
+ return 0;
+
+err_block_bind:
+ if (!tcf_block_cb_decref(block_cb)) {
+ __tcf_block_cb_unregister(block_cb);
+err_cb_register:
+ mlxsw_sp_acl_block_destroy(acl_block);
+ }
+ return err;
}
-static int mlxsw_sp_setup_tc_block_cb_eg(enum tc_setup_type type,
- void *type_data, void *cb_priv)
+static void
+mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tcf_block *block, bool ingress)
{
- return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, false);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_acl_block *acl_block;
+ struct tcf_block_cb *block_cb;
+ int err;
+
+ block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
+ mlxsw_sp);
+ if (!block_cb)
+ return;
+
+ if (ingress)
+ mlxsw_sp_port->ing_acl_block = NULL;
+ else
+ mlxsw_sp_port->eg_acl_block = NULL;
+
+ acl_block = tcf_block_cb_priv(block_cb);
+ err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block,
+ mlxsw_sp_port, ingress);
+ if (!err && !tcf_block_cb_decref(block_cb)) {
+ __tcf_block_cb_unregister(block_cb);
+ mlxsw_sp_acl_block_destroy(acl_block);
+ }
}
static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_block_offload *f)
{
tc_setup_cb_t *cb;
+ bool ingress;
+ int err;
- if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- cb = mlxsw_sp_setup_tc_block_cb_ig;
- else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
- cb = mlxsw_sp_setup_tc_block_cb_eg;
- else
+ if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
+ cb = mlxsw_sp_setup_tc_block_cb_matchall_ig;
+ ingress = true;
+ } else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
+ cb = mlxsw_sp_setup_tc_block_cb_matchall_eg;
+ ingress = false;
+ } else {
return -EOPNOTSUPP;
+ }
switch (f->command) {
case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
- mlxsw_sp_port);
+ err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
+ mlxsw_sp_port);
+ if (err)
+ return err;
+ err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port,
+ f->block, ingress);
+ if (err) {
+ tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
+ return err;
+ }
+ return 0;
case TC_BLOCK_UNBIND:
+ mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port,
+ f->block, ingress);
tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
return 0;
default:
@@ -1833,11 +1949,69 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
case TC_SETUP_QDISC_RED:
return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
+ case TC_SETUP_QDISC_PRIO:
+ return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
default:
return -EOPNOTSUPP;
}
}
+
+static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ if (!enable) {
+ if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) ||
+ mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) ||
+ !list_empty(&mlxsw_sp_port->mall_tc_list)) {
+ netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
+ return -EINVAL;
+ }
+ mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block);
+ mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block);
+ } else {
+ mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block);
+ mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block);
+ }
+ return 0;
+}
+
+typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
+
+static int mlxsw_sp_handle_feature(struct net_device *dev,
+ netdev_features_t wanted_features,
+ netdev_features_t feature,
+ mlxsw_sp_feature_handler feature_handler)
+{
+ netdev_features_t changes = wanted_features ^ dev->features;
+ bool enable = !!(wanted_features & feature);
+ int err;
+
+ if (!(changes & feature))
+ return 0;
+
+ err = feature_handler(dev, enable);
+ if (err) {
+ netdev_err(dev, "%s feature %pNF failed, err %d\n",
+ enable ? "Enable" : "Disable", &feature, err);
+ return err;
+ }
+
+ if (enable)
+ dev->features |= feature;
+ else
+ dev->features &= ~feature;
+
+ return 0;
+}
+static int mlxsw_sp_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ return mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
+ mlxsw_sp_feature_hw_tc);
+}
+
static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_open = mlxsw_sp_port_open,
.ndo_stop = mlxsw_sp_port_stop,
@@ -1852,6 +2026,7 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
.ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
+ .ndo_set_features = mlxsw_sp_set_features,
};
static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
@@ -3039,6 +3214,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_fids_init;
}
+ err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_qdiscs_init;
+ }
+
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
if (IS_ERR(mlxsw_sp_port_vlan)) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
@@ -3067,6 +3249,8 @@ err_register_netdev:
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
err_port_vlan_get:
+ mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
+err_port_qdiscs_init:
mlxsw_sp_port_fids_fini(mlxsw_sp_port);
err_port_fids_init:
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
@@ -3102,6 +3286,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
+ mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
mlxsw_sp_port_fids_fini(mlxsw_sp_port);
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
@@ -3933,6 +4118,253 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
.resource_query_enable = 1,
};
+static bool
+mlxsw_sp_resource_kvd_granularity_validate(struct netlink_ext_ack *extack,
+ u64 size)
+{
+ const struct mlxsw_config_profile *profile;
+
+ profile = &mlxsw_sp_config_profile;
+ if (size % profile->kvd_hash_granularity) {
+ NL_SET_ERR_MSG_MOD(extack, "resource set with wrong granularity");
+ return false;
+ }
+ return true;
+}
+
+static int
+mlxsw_sp_resource_kvd_size_validate(struct devlink *devlink, u64 size,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "kvd size cannot be changed");
+ return -EINVAL;
+}
+
+static int
+mlxsw_sp_resource_kvd_linear_size_validate(struct devlink *devlink, u64 size,
+ struct netlink_ext_ack *extack)
+{
+ if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+mlxsw_sp_resource_kvd_hash_single_size_validate(struct devlink *devlink, u64 size,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+
+ if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
+ return -EINVAL;
+
+ if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE)) {
+ NL_SET_ERR_MSG_MOD(extack, "hash single size is smaller than minimum");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+mlxsw_sp_resource_kvd_hash_double_size_validate(struct devlink *devlink, u64 size,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+
+ if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
+ return -EINVAL;
+
+ if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) {
+ NL_SET_ERR_MSG_MOD(extack, "hash double size is smaller than minimum");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ return mlxsw_sp_kvdl_occ_get(mlxsw_sp);
+}
+
+static struct devlink_resource_ops mlxsw_sp_resource_kvd_ops = {
+ .size_validate = mlxsw_sp_resource_kvd_size_validate,
+};
+
+static struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops = {
+ .size_validate = mlxsw_sp_resource_kvd_linear_size_validate,
+ .occ_get = mlxsw_sp_resource_kvd_linear_occ_get,
+};
+
+static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_single_ops = {
+ .size_validate = mlxsw_sp_resource_kvd_hash_single_size_validate,
+};
+
+static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_double_ops = {
+ .size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate,
+};
+
+static struct devlink_resource_size_params mlxsw_sp_kvd_size_params;
+static struct devlink_resource_size_params mlxsw_sp_linear_size_params;
+static struct devlink_resource_size_params mlxsw_sp_hash_single_size_params;
+static struct devlink_resource_size_params mlxsw_sp_hash_double_size_params;
+
+static void
+mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core)
+{
+ u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
+ KVD_SINGLE_MIN_SIZE);
+ u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
+ KVD_DOUBLE_MIN_SIZE);
+ u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
+ u32 linear_size_min = 0;
+
+ /* KVD top resource */
+ mlxsw_sp_kvd_size_params.size_min = kvd_size;
+ mlxsw_sp_kvd_size_params.size_max = kvd_size;
+ mlxsw_sp_kvd_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
+ mlxsw_sp_kvd_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+
+ /* Linear part init */
+ mlxsw_sp_linear_size_params.size_min = linear_size_min;
+ mlxsw_sp_linear_size_params.size_max = kvd_size - single_size_min -
+ double_size_min;
+ mlxsw_sp_linear_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
+ mlxsw_sp_linear_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+
+ /* Hash double part init */
+ mlxsw_sp_hash_double_size_params.size_min = double_size_min;
+ mlxsw_sp_hash_double_size_params.size_max = kvd_size - single_size_min -
+ linear_size_min;
+ mlxsw_sp_hash_double_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
+ mlxsw_sp_hash_double_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+
+ /* Hash single part init */
+ mlxsw_sp_hash_single_size_params.size_min = single_size_min;
+ mlxsw_sp_hash_single_size_params.size_max = kvd_size - double_size_min -
+ linear_size_min;
+ mlxsw_sp_hash_single_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
+ mlxsw_sp_hash_single_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+}
+
+static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ u32 kvd_size, single_size, double_size, linear_size;
+ const struct mlxsw_config_profile *profile;
+ int err;
+
+ profile = &mlxsw_sp_config_profile;
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
+ return -EIO;
+
+ mlxsw_sp_resource_size_params_prepare(mlxsw_core);
+ kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
+ true, kvd_size,
+ MLXSW_SP_RESOURCE_KVD,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &mlxsw_sp_kvd_size_params,
+ &mlxsw_sp_resource_kvd_ops);
+ if (err)
+ return err;
+
+ linear_size = profile->kvd_linear_size;
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
+ false, linear_size,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ MLXSW_SP_RESOURCE_KVD,
+ &mlxsw_sp_linear_size_params,
+ &mlxsw_sp_resource_kvd_linear_ops);
+ if (err)
+ return err;
+
+ double_size = kvd_size - linear_size;
+ double_size *= profile->kvd_hash_double_parts;
+ double_size /= profile->kvd_hash_double_parts +
+ profile->kvd_hash_single_parts;
+ double_size = rounddown(double_size, profile->kvd_hash_granularity);
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
+ false, double_size,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ MLXSW_SP_RESOURCE_KVD,
+ &mlxsw_sp_hash_double_size_params,
+ &mlxsw_sp_resource_kvd_hash_double_ops);
+ if (err)
+ return err;
+
+ single_size = kvd_size - double_size - linear_size;
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
+ false, single_size,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ MLXSW_SP_RESOURCE_KVD,
+ &mlxsw_sp_hash_single_size_params,
+ &mlxsw_sp_resource_kvd_hash_single_ops);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile,
+ u64 *p_single_size, u64 *p_double_size,
+ u64 *p_linear_size)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ u32 double_size;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
+ !profile->used_kvd_split_data)
+ return -EIO;
+
+ /* The hash part is what left of the kvd without the
+ * linear part. It is split to the single size and
+ * double size by the parts ratio from the profile.
+ * Both sizes must be a multiplications of the
+ * granularity from the profile. In case the user
+ * provided the sizes they are obtained via devlink.
+ */
+ err = devlink_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ p_linear_size);
+ if (err)
+ *p_linear_size = profile->kvd_linear_size;
+
+ err = devlink_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ p_double_size);
+ if (err) {
+ double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
+ *p_linear_size;
+ double_size *= profile->kvd_hash_double_parts;
+ double_size /= profile->kvd_hash_double_parts +
+ profile->kvd_hash_single_parts;
+ *p_double_size = rounddown(double_size,
+ profile->kvd_hash_granularity);
+ }
+
+ err = devlink_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ p_single_size);
+ if (err)
+ *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
+ *p_double_size - *p_linear_size;
+
+ /* Check results are legal. */
+ if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
+ *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
+ MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
+ return -EIO;
+
+ return 0;
+}
+
static struct mlxsw_driver mlxsw_sp_driver = {
.kind = mlxsw_sp_driver_name,
.priv_size = sizeof(struct mlxsw_sp),
@@ -3952,6 +4384,8 @@ static struct mlxsw_driver mlxsw_sp_driver = {
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
+ .resources_register = mlxsw_sp_resources_register,
+ .kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp_config_profile,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 05ce1befd9b3..bdd8f94a452c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -66,6 +66,18 @@
#define MLXSW_SP_KVD_LINEAR_SIZE 98304 /* entries */
#define MLXSW_SP_KVD_GRANULARITY 128
+#define MLXSW_SP_RESOURCE_NAME_KVD "kvd"
+#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR "linear"
+#define MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE "hash_single"
+#define MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE "hash_double"
+
+enum mlxsw_sp_resource_id {
+ MLXSW_SP_RESOURCE_KVD,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+};
+
struct mlxsw_sp_port;
struct mlxsw_sp_rif;
@@ -204,29 +216,6 @@ struct mlxsw_sp_port_vlan {
struct list_head bridge_vlan_node;
};
-enum mlxsw_sp_qdisc_type {
- MLXSW_SP_QDISC_NO_QDISC,
- MLXSW_SP_QDISC_RED,
-};
-
-struct mlxsw_sp_qdisc {
- u32 handle;
- enum mlxsw_sp_qdisc_type type;
- struct red_stats xstats_base;
- union {
- struct {
- u64 tail_drop_base;
- u64 ecn_base;
- u64 wred_drop_base;
- } red;
- } xstats;
-
- u64 tx_bytes;
- u64 tx_packets;
- u64 drops;
- u64 overlimits;
-};
-
/* No need an internal lock; At worse - miss a single periodic iteration */
struct mlxsw_sp_port_xstats {
u64 ecn;
@@ -269,7 +258,10 @@ struct mlxsw_sp_port {
} periodic_hw_stats;
struct mlxsw_sp_port_sample *sample;
struct list_head vlans_list;
- struct mlxsw_sp_qdisc root_qdisc;
+ struct mlxsw_sp_qdisc *root_qdisc;
+ unsigned acl_rule_count;
+ struct mlxsw_sp_acl_block *ing_acl_block;
+ struct mlxsw_sp_acl_block *eg_acl_block;
};
static inline bool
@@ -404,6 +396,16 @@ struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev);
+int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
+ struct mlxsw_sp_port *to,
+ enum mlxsw_sp_span_type type,
+ bool bind);
+void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from,
+ u8 destination_port,
+ enum mlxsw_sp_span_type type,
+ bool bind);
+struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port);
/* spectrum_dcb.c */
#ifdef CONFIG_MLXSW_SPECTRUM_DCB
@@ -458,13 +460,13 @@ void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
unsigned int entry_count,
unsigned int *p_alloc_size);
+u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp);
struct mlxsw_sp_acl_rule_info {
unsigned int priority;
struct mlxsw_afk_element_values values;
struct mlxsw_afa_block *act_block;
unsigned int counter_index;
- bool counter_valid;
};
enum mlxsw_sp_acl_profile {
@@ -477,8 +479,11 @@ struct mlxsw_sp_acl_profile_ops {
void *priv, void *ruleset_priv);
void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
- struct net_device *dev, bool ingress);
- void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
+ void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
u16 (*ruleset_group_id)(void *ruleset_priv);
size_t rule_priv_size;
int (*rule_add)(struct mlxsw_sp *mlxsw_sp,
@@ -498,17 +503,34 @@ struct mlxsw_sp_acl_ops {
enum mlxsw_sp_acl_profile profile);
};
+struct mlxsw_sp_acl_block;
struct mlxsw_sp_acl_ruleset;
/* spectrum_acl.c */
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
+struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block);
+unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block);
+void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block);
+void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block);
+bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block);
+struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
+ struct net *net);
+void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block);
+int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
+int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
struct mlxsw_sp_acl_ruleset *
-mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
- bool ingress, u32 chain_index,
+mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile);
struct mlxsw_sp_acl_ruleset *
-mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
- bool ingress, u32 chain_index,
+mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile);
void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset);
@@ -532,6 +554,10 @@ int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
u16 group_id);
int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei);
+int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct mlxsw_sp_acl_block *block,
+ struct net_device *out_dev);
int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct net_device *out_dev);
@@ -575,16 +601,23 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops;
/* spectrum_flower.c */
-int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f);
-void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f);
-int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f);
/* spectrum_qdisc.c */
+int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_red_qopt_offload *p);
+int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_prio_qopt_offload *p);
/* spectrum_fid.c */
int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 93dcd315f7d6..0897a5435cc2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -39,6 +39,7 @@
#include <linux/string.h>
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
+#include <net/net_namespace.h>
#include <net/tc_act/tc_vlan.h>
#include "reg.h"
@@ -70,9 +71,23 @@ struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
return acl->afk;
}
-struct mlxsw_sp_acl_ruleset_ht_key {
- struct net_device *dev; /* dev this ruleset is bound to */
+struct mlxsw_sp_acl_block_binding {
+ struct list_head list;
+ struct net_device *dev;
+ struct mlxsw_sp_port *mlxsw_sp_port;
bool ingress;
+};
+
+struct mlxsw_sp_acl_block {
+ struct list_head binding_list;
+ struct mlxsw_sp_acl_ruleset *ruleset_zero;
+ struct mlxsw_sp *mlxsw_sp;
+ unsigned int rule_count;
+ unsigned int disable_count;
+};
+
+struct mlxsw_sp_acl_ruleset_ht_key {
+ struct mlxsw_sp_acl_block *block;
u32 chain_index;
const struct mlxsw_sp_acl_profile_ops *ops;
};
@@ -118,8 +133,185 @@ struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
return mlxsw_sp->acl->dummy_fid;
}
+struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block)
+{
+ return block->mlxsw_sp;
+}
+
+unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block)
+{
+ return block ? block->rule_count : 0;
+}
+
+void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block)
+{
+ if (block)
+ block->disable_count++;
+}
+
+void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block)
+{
+ if (block)
+ block->disable_count--;
+}
+
+bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block)
+{
+ return block->disable_count;
+}
+
+static int
+mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_acl_block_binding *binding)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+
+ return ops->ruleset_bind(mlxsw_sp, ruleset->priv,
+ binding->mlxsw_sp_port, binding->ingress);
+}
+
+static void
+mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_acl_block_binding *binding)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+
+ ops->ruleset_unbind(mlxsw_sp, ruleset->priv,
+ binding->mlxsw_sp_port, binding->ingress);
+}
+
+static bool mlxsw_sp_acl_ruleset_block_bound(struct mlxsw_sp_acl_block *block)
+{
+ return block->ruleset_zero;
+}
+
+static int
+mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset,
+ struct mlxsw_sp_acl_block *block)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+ int err;
+
+ block->ruleset_zero = ruleset;
+ list_for_each_entry(binding, &block->binding_list, list) {
+ err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
+ if (err)
+ goto rollback;
+ }
+ return 0;
+
+rollback:
+ list_for_each_entry_continue_reverse(binding, &block->binding_list,
+ list)
+ mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
+ block->ruleset_zero = NULL;
+
+ return err;
+}
+
+static void
+mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset,
+ struct mlxsw_sp_acl_block *block)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
+ block->ruleset_zero = NULL;
+}
+
+struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
+ struct net *net)
+{
+ struct mlxsw_sp_acl_block *block;
+
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (!block)
+ return NULL;
+ INIT_LIST_HEAD(&block->binding_list);
+ block->mlxsw_sp = mlxsw_sp;
+ return block;
+}
+
+void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block)
+{
+ WARN_ON(!list_empty(&block->binding_list));
+ kfree(block);
+}
+
+static struct mlxsw_sp_acl_block_binding *
+mlxsw_sp_acl_block_lookup(struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port, bool ingress)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ if (binding->mlxsw_sp_port == mlxsw_sp_port &&
+ binding->ingress == ingress)
+ return binding;
+ return NULL;
+}
+
+int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+ int err;
+
+ if (WARN_ON(mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress)))
+ return -EEXIST;
+
+ binding = kzalloc(sizeof(*binding), GFP_KERNEL);
+ if (!binding)
+ return -ENOMEM;
+ binding->mlxsw_sp_port = mlxsw_sp_port;
+ binding->ingress = ingress;
+
+ if (mlxsw_sp_acl_ruleset_block_bound(block)) {
+ err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
+ if (err)
+ goto err_ruleset_bind;
+ }
+
+ list_add(&binding->list, &block->binding_list);
+ return 0;
+
+err_ruleset_bind:
+ kfree(binding);
+ return err;
+}
+
+int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+
+ binding = mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress);
+ if (!binding)
+ return -ENOENT;
+
+ list_del(&binding->list);
+
+ if (mlxsw_sp_acl_ruleset_block_bound(block))
+ mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
+
+ kfree(binding);
+ return 0;
+}
+
static struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
const struct mlxsw_sp_acl_profile_ops *ops)
{
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
@@ -132,6 +324,8 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
if (!ruleset)
return ERR_PTR(-ENOMEM);
ruleset->ref_count = 1;
+ ruleset->ht_key.block = block;
+ ruleset->ht_key.chain_index = chain_index;
ruleset->ht_key.ops = ops;
err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
@@ -142,68 +336,50 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_ops_ruleset_add;
- return ruleset;
-
-err_ops_ruleset_add:
- rhashtable_destroy(&ruleset->rule_ht);
-err_rhashtable_init:
- kfree(ruleset);
- return ERR_PTR(err);
-}
-
-static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_ruleset *ruleset)
-{
- const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
-
- ops->ruleset_del(mlxsw_sp, ruleset->priv);
- rhashtable_destroy(&ruleset->rule_ht);
- kfree(ruleset);
-}
-
-static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_ruleset *ruleset,
- struct net_device *dev, bool ingress,
- u32 chain_index)
-{
- const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
- struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
- int err;
-
- ruleset->ht_key.dev = dev;
- ruleset->ht_key.ingress = ingress;
- ruleset->ht_key.chain_index = chain_index;
err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
mlxsw_sp_acl_ruleset_ht_params);
if (err)
- return err;
- if (!ruleset->ht_key.chain_index) {
+ goto err_ht_insert;
+
+ if (!chain_index) {
/* We only need ruleset with chain index 0, the implicit one,
* to be directly bound to device. The rest of the rulesets
* are bound by "Goto action set".
*/
- err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress);
+ err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block);
if (err)
- goto err_ops_ruleset_bind;
+ goto err_ruleset_bind;
}
- return 0;
-err_ops_ruleset_bind:
+ return ruleset;
+
+err_ruleset_bind:
rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
mlxsw_sp_acl_ruleset_ht_params);
- return err;
+err_ht_insert:
+ ops->ruleset_del(mlxsw_sp, ruleset->priv);
+err_ops_ruleset_add:
+ rhashtable_destroy(&ruleset->rule_ht);
+err_rhashtable_init:
+ kfree(ruleset);
+ return ERR_PTR(err);
}
-static void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_ruleset *ruleset)
+static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset)
{
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
+ u32 chain_index = ruleset->ht_key.chain_index;
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
- if (!ruleset->ht_key.chain_index)
- ops->ruleset_unbind(mlxsw_sp, ruleset->priv);
+ if (!chain_index)
+ mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, block);
rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
mlxsw_sp_acl_ruleset_ht_params);
+ ops->ruleset_del(mlxsw_sp, ruleset->priv);
+ rhashtable_destroy(&ruleset->rule_ht);
+ kfree(ruleset);
}
static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
@@ -216,20 +392,18 @@ static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
{
if (--ruleset->ref_count)
return;
- mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, ruleset);
mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
}
static struct mlxsw_sp_acl_ruleset *
-__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl, struct net_device *dev,
- bool ingress, u32 chain_index,
+__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
const struct mlxsw_sp_acl_profile_ops *ops)
{
struct mlxsw_sp_acl_ruleset_ht_key ht_key;
memset(&ht_key, 0, sizeof(ht_key));
- ht_key.dev = dev;
- ht_key.ingress = ingress;
+ ht_key.block = block;
ht_key.chain_index = chain_index;
ht_key.ops = ops;
return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
@@ -237,8 +411,8 @@ __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl, struct net_device *dev,
}
struct mlxsw_sp_acl_ruleset *
-mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
- bool ingress, u32 chain_index,
+mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile)
{
const struct mlxsw_sp_acl_profile_ops *ops;
@@ -248,45 +422,31 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
ops = acl->ops->profile_ops(mlxsw_sp, profile);
if (!ops)
return ERR_PTR(-EINVAL);
- ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress,
- chain_index, ops);
+ ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
if (!ruleset)
return ERR_PTR(-ENOENT);
return ruleset;
}
struct mlxsw_sp_acl_ruleset *
-mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
- bool ingress, u32 chain_index,
+mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile)
{
const struct mlxsw_sp_acl_profile_ops *ops;
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
struct mlxsw_sp_acl_ruleset *ruleset;
- int err;
ops = acl->ops->profile_ops(mlxsw_sp, profile);
if (!ops)
return ERR_PTR(-EINVAL);
- ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress,
- chain_index, ops);
+ ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
if (ruleset) {
mlxsw_sp_acl_ruleset_ref_inc(ruleset);
return ruleset;
}
- ruleset = mlxsw_sp_acl_ruleset_create(mlxsw_sp, ops);
- if (IS_ERR(ruleset))
- return ruleset;
- err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev,
- ingress, chain_index);
- if (err)
- goto err_ruleset_bind;
- return ruleset;
-
-err_ruleset_bind:
- mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
- return ERR_PTR(err);
+ return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops);
}
void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
@@ -302,27 +462,6 @@ u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset)
return ops->ruleset_group_id(ruleset->priv);
}
-static int
-mlxsw_sp_acl_rulei_counter_alloc(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_rule_info *rulei)
-{
- int err;
-
- err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &rulei->counter_index);
- if (err)
- return err;
- rulei->counter_valid = true;
- return 0;
-}
-
-static void
-mlxsw_sp_acl_rulei_counter_free(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_rule_info *rulei)
-{
- rulei->counter_valid = false;
- mlxsw_sp_flow_counter_free(mlxsw_sp, rulei->counter_index);
-}
-
struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
{
@@ -427,6 +566,34 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
local_port, in_port);
}
+int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct mlxsw_sp_acl_block *block,
+ struct net_device *out_dev)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+ struct mlxsw_sp_port *out_port;
+ struct mlxsw_sp_port *in_port;
+
+ if (!list_is_singular(&block->binding_list))
+ return -EOPNOTSUPP;
+
+ binding = list_first_entry(&block->binding_list,
+ struct mlxsw_sp_acl_block_binding, list);
+ in_port = binding->mlxsw_sp_port;
+ if (!mlxsw_sp_port_dev_check(out_dev))
+ return -EINVAL;
+
+ out_port = netdev_priv(out_dev);
+ if (out_port->mlxsw_sp != mlxsw_sp)
+ return -EINVAL;
+
+ return mlxsw_afa_block_append_mirror(rulei->act_block,
+ in_port->local_port,
+ out_port->local_port,
+ binding->ingress);
+}
+
int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
u32 action, u16 vid, u16 proto, u8 prio)
@@ -459,7 +626,7 @@ int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei)
{
return mlxsw_afa_block_append_counter(rulei->act_block,
- rulei->counter_index);
+ &rulei->counter_index);
}
int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
@@ -493,13 +660,8 @@ mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
goto err_rulei_create;
}
- err = mlxsw_sp_acl_rulei_counter_alloc(mlxsw_sp, rule->rulei);
- if (err)
- goto err_counter_alloc;
return rule;
-err_counter_alloc:
- mlxsw_sp_acl_rulei_destroy(rule->rulei);
err_rulei_create:
kfree(rule);
err_alloc:
@@ -512,7 +674,6 @@ void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
- mlxsw_sp_acl_rulei_counter_free(mlxsw_sp, rule->rulei);
mlxsw_sp_acl_rulei_destroy(rule->rulei);
kfree(rule);
mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
@@ -535,6 +696,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
goto err_rhashtable_insert;
list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
+ ruleset->ht_key.block->rule_count++;
return 0;
err_rhashtable_insert:
@@ -548,6 +710,7 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ ruleset->ht_key.block->rule_count--;
list_del(&rule->list);
rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
mlxsw_sp_acl_rule_ht_params);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
index 4d3340ed0291..6ca6894125f0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
@@ -108,11 +108,77 @@ static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index)
mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
}
+static int
+mlxsw_sp_act_counter_index_get(void *priv, unsigned int *p_counter_index)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ return mlxsw_sp_flow_counter_alloc(mlxsw_sp, p_counter_index);
+}
+
+static void
+mlxsw_sp_act_counter_index_put(void *priv, unsigned int counter_index)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ mlxsw_sp_flow_counter_free(mlxsw_sp, counter_index);
+}
+
+static int
+mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port, u8 local_out_port,
+ bool ingress, int *p_span_id)
+{
+ struct mlxsw_sp_port *in_port, *out_port;
+ struct mlxsw_sp_span_entry *span_entry;
+ struct mlxsw_sp *mlxsw_sp = priv;
+ enum mlxsw_sp_span_type type;
+ int err;
+
+ type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
+ out_port = mlxsw_sp->ports[local_out_port];
+ in_port = mlxsw_sp->ports[local_in_port];
+
+ err = mlxsw_sp_span_mirror_add(in_port, out_port, type, false);
+ if (err)
+ return err;
+
+ span_entry = mlxsw_sp_span_entry_find(mlxsw_sp, local_out_port);
+ if (!span_entry) {
+ err = -ENOENT;
+ goto err_span_entry_find;
+ }
+
+ *p_span_id = span_entry->id;
+ return 0;
+
+err_span_entry_find:
+ mlxsw_sp_span_mirror_del(in_port, local_out_port, type, false);
+ return err;
+}
+
+static void
+mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, u8 local_out_port,
+ bool ingress)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp_port *in_port;
+ enum mlxsw_sp_span_type type;
+
+ type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
+ in_port = mlxsw_sp->ports[local_in_port];
+
+ mlxsw_sp_span_mirror_del(in_port, local_out_port, type, false);
+}
+
static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
.kvdl_set_add = mlxsw_sp_act_kvdl_set_add,
.kvdl_set_del = mlxsw_sp_act_kvdl_set_del,
.kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add,
.kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del,
+ .counter_index_get = mlxsw_sp_act_counter_index_get,
+ .counter_index_put = mlxsw_sp_act_counter_index_put,
+ .mirror_add = mlxsw_sp_act_mirror_add,
+ .mirror_del = mlxsw_sp_act_mirror_del,
};
int mlxsw_sp_afa_init(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 7e8284b46968..c6e180c2be1e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -154,10 +154,6 @@ struct mlxsw_sp_acl_tcam_group {
struct list_head region_list;
unsigned int region_count;
struct rhashtable chunk_ht;
- struct {
- u16 local_port;
- bool ingress;
- } bound;
struct mlxsw_sp_acl_tcam_group_ops *ops;
const struct mlxsw_sp_acl_tcam_pattern *patterns;
unsigned int patterns_count;
@@ -262,35 +258,29 @@ static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_group *group,
- struct net_device *dev, bool ingress)
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
{
- struct mlxsw_sp_port *mlxsw_sp_port;
char ppbt_pl[MLXSW_REG_PPBT_LEN];
- if (!mlxsw_sp_port_dev_check(dev))
- return -EINVAL;
-
- mlxsw_sp_port = netdev_priv(dev);
- group->bound.local_port = mlxsw_sp_port->local_port;
- group->bound.ingress = ingress;
- mlxsw_reg_ppbt_pack(ppbt_pl,
- group->bound.ingress ? MLXSW_REG_PXBT_E_IACL :
- MLXSW_REG_PXBT_E_EACL,
- MLXSW_REG_PXBT_OP_BIND, group->bound.local_port,
+ mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
+ MLXSW_REG_PXBT_E_EACL,
+ MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
group->id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
}
static void
mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_group *group)
+ struct mlxsw_sp_acl_tcam_group *group,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
{
char ppbt_pl[MLXSW_REG_PPBT_LEN];
- mlxsw_reg_ppbt_pack(ppbt_pl,
- group->bound.ingress ? MLXSW_REG_PXBT_E_IACL :
- MLXSW_REG_PXBT_E_EACL,
- MLXSW_REG_PXBT_OP_UNBIND, group->bound.local_port,
+ mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
+ MLXSW_REG_PXBT_E_EACL,
+ MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
group->id);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
}
@@ -1056,21 +1046,25 @@ mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
void *ruleset_priv,
- struct net_device *dev, bool ingress)
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group,
- dev, ingress);
+ mlxsw_sp_port, ingress);
}
static void
mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
- void *ruleset_priv)
+ void *ruleset_priv,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
- mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group);
+ mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group,
+ mlxsw_sp_port, ingress);
}
static u16
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index 96fdba78acab..f56fa18d6b26 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -771,14 +771,33 @@ static struct devlink_dpipe_table_ops mlxsw_sp_host4_ops = {
.size_get = mlxsw_sp_dpipe_table_host4_size_get,
};
+#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST4 1
+
static int mlxsw_sp_dpipe_host4_table_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ int err;
- return devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
- &mlxsw_sp_host4_ops,
- mlxsw_sp, false);
+ err = devlink_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
+ &mlxsw_sp_host4_ops,
+ mlxsw_sp, false);
+ if (err)
+ return err;
+
+ err = devlink_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST4);
+ if (err)
+ goto err_resource_set;
+
+ return 0;
+
+err_resource_set:
+ devlink_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4);
+ return err;
}
static void mlxsw_sp_dpipe_host4_table_fini(struct mlxsw_sp *mlxsw_sp)
@@ -829,14 +848,33 @@ static struct devlink_dpipe_table_ops mlxsw_sp_host6_ops = {
.size_get = mlxsw_sp_dpipe_table_host6_size_get,
};
+#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST6 2
+
static int mlxsw_sp_dpipe_host6_table_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ int err;
- return devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
- &mlxsw_sp_host6_ops,
- mlxsw_sp, false);
+ err = devlink_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
+ &mlxsw_sp_host6_ops,
+ mlxsw_sp, false);
+ if (err)
+ return err;
+
+ err = devlink_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST6);
+ if (err)
+ goto err_resource_set;
+
+ return 0;
+
+err_resource_set:
+ devlink_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6);
+ return err;
}
static void mlxsw_sp_dpipe_host6_table_fini(struct mlxsw_sp *mlxsw_sp)
@@ -1213,14 +1251,33 @@ static struct devlink_dpipe_table_ops mlxsw_sp_dpipe_table_adj_ops = {
.size_get = mlxsw_sp_dpipe_table_adj_size_get,
};
+#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_ADJ 1
+
static int mlxsw_sp_dpipe_adj_table_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ int err;
- return devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
- &mlxsw_sp_dpipe_table_adj_ops,
- mlxsw_sp, false);
+ err = devlink_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
+ &mlxsw_sp_dpipe_table_adj_ops,
+ mlxsw_sp, false);
+ if (err)
+ return err;
+
+ err = devlink_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_ADJ);
+ if (err)
+ goto err_resource_set;
+
+ return 0;
+
+err_resource_set:
+ devlink_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ);
+ return err;
}
static void mlxsw_sp_dpipe_adj_table_fini(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 2f0e57857ea4..6ce00e28d4ea 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -35,6 +35,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
+#include <net/net_namespace.h>
#include <net/flow_dissector.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
@@ -45,7 +46,7 @@
#include "core_acl_flex_keys.h"
static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
- struct net_device *dev, bool ingress,
+ struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_acl_rule_info *rulei,
struct tcf_exts *exts)
{
@@ -80,8 +81,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset;
u16 group_id;
- ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, dev,
- ingress,
+ ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
chain_index,
MLXSW_SP_ACL_PROFILE_FLOWER);
if (IS_ERR(ruleset))
@@ -92,7 +92,6 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
} else if (is_tcf_mirred_egress_redirect(a)) {
- int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev;
struct mlxsw_sp_fid *fid;
u16 fid_index;
@@ -104,14 +103,18 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- out_dev = __dev_get_by_index(dev_net(dev), ifindex);
- if (out_dev == dev)
- out_dev = NULL;
-
+ out_dev = tcf_mirred_dev(a);
err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
out_dev);
if (err)
return err;
+ } else if (is_tcf_mirred_egress_mirror(a)) {
+ struct net_device *out_dev = tcf_mirred_dev(a);
+
+ err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
+ block, out_dev);
+ if (err)
+ return err;
} else if (is_tcf_vlan(a)) {
u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
u32 action = tcf_vlan_action(a);
@@ -266,7 +269,7 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
- struct net_device *dev, bool ingress,
+ struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_acl_rule_info *rulei,
struct tc_cls_flower_offload *f)
{
@@ -384,21 +387,19 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, ingress,
- rulei, f->exts);
+ return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts);
}
-int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct net_device *dev = mlxsw_sp_port->dev;
struct mlxsw_sp_acl_rule_info *rulei;
struct mlxsw_sp_acl_ruleset *ruleset;
struct mlxsw_sp_acl_rule *rule;
int err;
- ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, dev, ingress,
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
f->common.chain_index,
MLXSW_SP_ACL_PROFILE_FLOWER);
if (IS_ERR(ruleset))
@@ -411,7 +412,7 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
}
rulei = mlxsw_sp_acl_rule_rulei(rule);
- err = mlxsw_sp_flower_parse(mlxsw_sp, dev, ingress, rulei, f);
+ err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f);
if (err)
goto err_flower_parse;
@@ -435,15 +436,15 @@ err_rule_create:
return err;
}
-void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_acl_ruleset *ruleset;
struct mlxsw_sp_acl_rule *rule;
- ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
- ingress, f->common.chain_index,
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
+ f->common.chain_index,
MLXSW_SP_ACL_PROFILE_FLOWER);
if (IS_ERR(ruleset))
return;
@@ -457,10 +458,10 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
}
-int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_acl_ruleset *ruleset;
struct mlxsw_sp_acl_rule *rule;
u64 packets;
@@ -468,8 +469,8 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
u64 bytes;
int err;
- ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
- ingress, f->common.chain_index,
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
+ f->common.chain_index,
MLXSW_SP_ACL_PROFILE_FLOWER);
if (WARN_ON(IS_ERR(ruleset)))
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
index 310c38247b5c..55f9d2d70f9e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
@@ -286,6 +286,32 @@ static void mlxsw_sp_kvdl_parts_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_kvdl_part_fini(mlxsw_sp, i);
}
+static u64 mlxsw_sp_kvdl_part_occ(struct mlxsw_sp_kvdl_part *part)
+{
+ unsigned int nr_entries;
+ int bit = -1;
+ u64 occ = 0;
+
+ nr_entries = (part->info->end_index -
+ part->info->start_index + 1) /
+ part->info->alloc_size;
+ while ((bit = find_next_bit(part->usage, nr_entries, bit + 1))
+ < nr_entries)
+ occ += part->info->alloc_size;
+ return occ;
+}
+
+u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_kvdl_part *part;
+ u64 occ = 0;
+
+ list_for_each_entry(part, &mlxsw_sp->kvdl->parts_list, list)
+ occ += mlxsw_sp_kvdl_part_occ(part);
+
+ return occ;
+}
+
int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_kvdl *kvdl;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
index 34a0b632e5dd..4c7f32d4288d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
@@ -243,7 +243,8 @@ mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp,
if (!afa_block)
return ERR_PTR(-ENOMEM);
- err = mlxsw_afa_block_append_counter(afa_block, counter_index);
+ err = mlxsw_afa_block_append_allocated_counter(afa_block,
+ counter_index);
if (err)
goto err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index b5397da94d7f..0b7670459051 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -41,6 +41,150 @@
#include "spectrum.h"
#include "reg.h"
+#define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
+
+enum mlxsw_sp_qdisc_type {
+ MLXSW_SP_QDISC_NO_QDISC,
+ MLXSW_SP_QDISC_RED,
+ MLXSW_SP_QDISC_PRIO,
+};
+
+struct mlxsw_sp_qdisc_ops {
+ enum mlxsw_sp_qdisc_type type;
+ int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params);
+ int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
+ int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
+ int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr);
+ int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *xstats_ptr);
+ void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
+ /* unoffload - to be used for a qdisc that stops being offloaded without
+ * being destroyed.
+ */
+ void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
+};
+
+struct mlxsw_sp_qdisc {
+ u32 handle;
+ u8 tclass_num;
+ union {
+ struct red_stats red;
+ } xstats_base;
+ struct mlxsw_sp_qdisc_stats {
+ u64 tx_bytes;
+ u64 tx_packets;
+ u64 drops;
+ u64 overlimits;
+ u64 backlog;
+ } stats_base;
+
+ struct mlxsw_sp_qdisc_ops *ops;
+};
+
+static bool
+mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle,
+ enum mlxsw_sp_qdisc_type type)
+{
+ return mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
+ mlxsw_sp_qdisc->ops->type == type &&
+ mlxsw_sp_qdisc->handle == handle;
+}
+
+static int
+mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ int err = 0;
+
+ if (!mlxsw_sp_qdisc)
+ return 0;
+
+ if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy)
+ err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+
+ mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
+ mlxsw_sp_qdisc->ops = NULL;
+ return err;
+}
+
+static int
+mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc_ops *ops, void *params)
+{
+ int err;
+
+ if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
+ /* In case this location contained a different qdisc of the
+ * same type we can override the old qdisc configuration.
+ * Otherwise, we need to remove the old qdisc before setting the
+ * new one.
+ */
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params);
+ if (err)
+ goto err_bad_param;
+
+ err = ops->replace(mlxsw_sp_port, mlxsw_sp_qdisc, params);
+ if (err)
+ goto err_config;
+
+ if (mlxsw_sp_qdisc->handle != handle) {
+ mlxsw_sp_qdisc->ops = ops;
+ if (ops->clean_stats)
+ ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
+ }
+
+ mlxsw_sp_qdisc->handle = handle;
+ return 0;
+
+err_bad_param:
+err_config:
+ if (mlxsw_sp_qdisc->handle == handle && ops->unoffload)
+ ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
+
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ return err;
+}
+
+static int
+mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
+ mlxsw_sp_qdisc->ops->get_stats)
+ return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port,
+ mlxsw_sp_qdisc,
+ stats_ptr);
+
+ return -EOPNOTSUPP;
+}
+
+static int
+mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *xstats_ptr)
+{
+ if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
+ mlxsw_sp_qdisc->ops->get_xstats)
+ return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port,
+ mlxsw_sp_qdisc,
+ xstats_ptr);
+
+ return -EOPNOTSUPP;
+}
+
static int
mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
int tclass_num, u32 min, u32 max,
@@ -80,80 +224,78 @@ mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port,
}
static void
-mlxsw_sp_setup_tc_qdisc_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num)
+mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base;
+ u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
struct rtnl_link_stats64 *stats;
+ struct red_stats *red_base;
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+ stats_base = &mlxsw_sp_qdisc->stats_base;
+ red_base = &mlxsw_sp_qdisc->xstats_base.red;
- mlxsw_sp_qdisc->tx_packets = stats->tx_packets;
- mlxsw_sp_qdisc->tx_bytes = stats->tx_bytes;
+ stats_base->tx_packets = stats->tx_packets;
+ stats_base->tx_bytes = stats->tx_bytes;
- switch (mlxsw_sp_qdisc->type) {
- case MLXSW_SP_QDISC_RED:
- xstats_base->prob_mark = xstats->ecn;
- xstats_base->prob_drop = xstats->wred_drop[tclass_num];
- xstats_base->pdrop = xstats->tail_drop[tclass_num];
+ red_base->prob_mark = xstats->ecn;
+ red_base->prob_drop = xstats->wred_drop[tclass_num];
+ red_base->pdrop = xstats->tail_drop[tclass_num];
- mlxsw_sp_qdisc->overlimits = xstats_base->prob_drop +
- xstats_base->prob_mark;
- mlxsw_sp_qdisc->drops = xstats_base->prob_drop +
- xstats_base->pdrop;
- break;
- default:
- break;
- }
+ stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
+ stats_base->drops = red_base->prob_drop + red_base->pdrop;
+
+ stats_base->backlog = 0;
}
static int
-mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num)
+mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- int err;
-
- if (mlxsw_sp_qdisc->handle != handle)
- return 0;
-
- err = mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, tclass_num);
- mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
- mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_NO_QDISC;
-
- return err;
+ return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port,
+ mlxsw_sp_qdisc->tclass_num);
}
static int
-mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num,
- struct tc_red_qopt_offload_params *p)
+mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u32 min, max;
- u64 prob;
- int err = 0;
+ struct tc_red_qopt_offload_params *p = params;
if (p->min > p->max) {
dev_err(mlxsw_sp->bus_info->dev,
"spectrum: RED: min %u is bigger then max %u\n", p->min,
p->max);
- goto err_bad_param;
+ return -EINVAL;
}
if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
dev_err(mlxsw_sp->bus_info->dev,
"spectrum: RED: max value %u is too big\n", p->max);
- goto err_bad_param;
+ return -EINVAL;
}
if (p->min == 0 || p->max == 0) {
dev_err(mlxsw_sp->bus_info->dev,
"spectrum: RED: 0 value is illegal for min and max\n");
- goto err_bad_param;
+ return -EINVAL;
}
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct tc_red_qopt_offload_params *p = params;
+ u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ u32 min, max;
+ u64 prob;
/* calculate probability in percentage */
prob = p->probability;
@@ -162,116 +304,309 @@ mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
prob = DIV_ROUND_UP(prob, 1 << 16);
min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
- err = mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min,
- max, prob, p->is_ecn);
- if (err)
- goto err_config;
-
- mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_RED;
- if (mlxsw_sp_qdisc->handle != handle)
- mlxsw_sp_setup_tc_qdisc_clean_stats(mlxsw_sp_port,
- mlxsw_sp_qdisc,
- tclass_num);
+ return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min,
+ max, prob, p->is_ecn);
+}
- mlxsw_sp_qdisc->handle = handle;
- return 0;
+static void
+mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_red_qopt_offload_params *p = params;
+ u64 backlog;
-err_bad_param:
- err = -EINVAL;
-err_config:
- mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, mlxsw_sp_qdisc->handle,
- mlxsw_sp_qdisc, tclass_num);
- return err;
+ backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_qdisc->stats_base.backlog);
+ p->qstats->backlog -= backlog;
}
static int
-mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num, struct red_stats *res)
+ void *xstats_ptr)
{
- struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base;
+ struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
+ u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_port_xstats *xstats;
-
- if (mlxsw_sp_qdisc->handle != handle ||
- mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED)
- return -EOPNOTSUPP;
+ struct red_stats *res = xstats_ptr;
+ int early_drops, marks, pdrops;
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
- res->prob_drop = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
- res->prob_mark = xstats->ecn - xstats_base->prob_mark;
- res->pdrop = xstats->tail_drop[tclass_num] - xstats_base->pdrop;
+ early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
+ marks = xstats->ecn - xstats_base->prob_mark;
+ pdrops = xstats->tail_drop[tclass_num] - xstats_base->pdrop;
+
+ res->pdrop += pdrops;
+ res->prob_drop += early_drops;
+ res->prob_mark += marks;
+
+ xstats_base->pdrop += pdrops;
+ xstats_base->prob_drop += early_drops;
+ xstats_base->prob_mark += marks;
return 0;
}
static int
-mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num,
- struct tc_red_qopt_offload_stats *res)
+ struct tc_qopt_offload_stats *stats_ptr)
{
- u64 tx_bytes, tx_packets, overlimits, drops;
+ u64 tx_bytes, tx_packets, overlimits, drops, backlog;
+ u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
struct rtnl_link_stats64 *stats;
- if (mlxsw_sp_qdisc->handle != handle ||
- mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED)
- return -EOPNOTSUPP;
-
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+ stats_base = &mlxsw_sp_qdisc->stats_base;
- tx_bytes = stats->tx_bytes - mlxsw_sp_qdisc->tx_bytes;
- tx_packets = stats->tx_packets - mlxsw_sp_qdisc->tx_packets;
+ tx_bytes = stats->tx_bytes - stats_base->tx_bytes;
+ tx_packets = stats->tx_packets - stats_base->tx_packets;
overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
- mlxsw_sp_qdisc->overlimits;
+ stats_base->overlimits;
drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] -
- mlxsw_sp_qdisc->drops;
-
- _bstats_update(res->bstats, tx_bytes, tx_packets);
- res->qstats->overlimits += overlimits;
- res->qstats->drops += drops;
- res->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
- xstats->backlog[tclass_num]);
-
- mlxsw_sp_qdisc->drops += drops;
- mlxsw_sp_qdisc->overlimits += overlimits;
- mlxsw_sp_qdisc->tx_bytes += tx_bytes;
- mlxsw_sp_qdisc->tx_packets += tx_packets;
+ stats_base->drops;
+ backlog = xstats->backlog[tclass_num];
+
+ _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
+ stats_ptr->qstats->overlimits += overlimits;
+ stats_ptr->qstats->drops += drops;
+ stats_ptr->qstats->backlog +=
+ mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ backlog) -
+ mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ stats_base->backlog);
+
+ stats_base->backlog = backlog;
+ stats_base->drops += drops;
+ stats_base->overlimits += overlimits;
+ stats_base->tx_bytes += tx_bytes;
+ stats_base->tx_packets += tx_packets;
return 0;
}
#define MLXSW_SP_PORT_DEFAULT_TCLASS 0
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
+ .type = MLXSW_SP_QDISC_RED,
+ .check_params = mlxsw_sp_qdisc_red_check_params,
+ .replace = mlxsw_sp_qdisc_red_replace,
+ .unoffload = mlxsw_sp_qdisc_red_unoffload,
+ .destroy = mlxsw_sp_qdisc_red_destroy,
+ .get_stats = mlxsw_sp_qdisc_get_red_stats,
+ .get_xstats = mlxsw_sp_qdisc_get_red_xstats,
+ .clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
+};
+
int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_red_qopt_offload *p)
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- int tclass_num;
if (p->parent != TC_H_ROOT)
return -EOPNOTSUPP;
- mlxsw_sp_qdisc = &mlxsw_sp_port->root_qdisc;
- tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
+ mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc;
+
+ if (p->command == TC_RED_REPLACE)
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc,
+ &mlxsw_sp_qdisc_ops_red,
+ &p->set);
+
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
+ MLXSW_SP_QDISC_RED))
+ return -EOPNOTSUPP;
switch (p->command) {
- case TC_RED_REPLACE:
- return mlxsw_sp_qdisc_red_replace(mlxsw_sp_port, p->handle,
- mlxsw_sp_qdisc, tclass_num,
- &p->set);
case TC_RED_DESTROY:
- return mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, p->handle,
- mlxsw_sp_qdisc, tclass_num);
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
case TC_RED_XSTATS:
- return mlxsw_sp_qdisc_get_red_xstats(mlxsw_sp_port, p->handle,
- mlxsw_sp_qdisc, tclass_num,
- p->xstats);
+ return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->xstats);
case TC_RED_STATS:
- return mlxsw_sp_qdisc_get_red_stats(mlxsw_sp_port, p->handle,
- mlxsw_sp_qdisc, tclass_num,
- &p->stats);
+ return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &p->stats);
default:
return -EOPNOTSUPP;
}
}
+
+static int
+mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ int i;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
+ MLXSW_SP_PORT_DEFAULT_TCLASS);
+
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_prio_qopt_offload_params *p = params;
+
+ if (p->bands > IEEE_8021QAZ_MAX_TCS)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_prio_qopt_offload_params *p = params;
+ int tclass, i;
+ int err;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->priomap[i]);
+ err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, tclass);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_prio_qopt_offload_params *p = params;
+ u64 backlog;
+
+ backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_qdisc->stats_base.backlog);
+ p->qstats->backlog -= backlog;
+}
+
+static int
+mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ u64 tx_bytes, tx_packets, drops = 0, backlog = 0;
+ struct mlxsw_sp_qdisc_stats *stats_base;
+ struct mlxsw_sp_port_xstats *xstats;
+ struct rtnl_link_stats64 *stats;
+ int i;
+
+ xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
+ stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+ stats_base = &mlxsw_sp_qdisc->stats_base;
+
+ tx_bytes = stats->tx_bytes - stats_base->tx_bytes;
+ tx_packets = stats->tx_packets - stats_base->tx_packets;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ drops += xstats->tail_drop[i];
+ backlog += xstats->backlog[i];
+ }
+ drops = drops - stats_base->drops;
+
+ _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
+ stats_ptr->qstats->drops += drops;
+ stats_ptr->qstats->backlog +=
+ mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ backlog) -
+ mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ stats_base->backlog);
+ stats_base->backlog = backlog;
+ stats_base->drops += drops;
+ stats_base->tx_bytes += tx_bytes;
+ stats_base->tx_packets += tx_packets;
+ return 0;
+}
+
+static void
+mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ struct mlxsw_sp_qdisc_stats *stats_base;
+ struct mlxsw_sp_port_xstats *xstats;
+ struct rtnl_link_stats64 *stats;
+ int i;
+
+ xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
+ stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+ stats_base = &mlxsw_sp_qdisc->stats_base;
+
+ stats_base->tx_packets = stats->tx_packets;
+ stats_base->tx_bytes = stats->tx_bytes;
+
+ stats_base->drops = 0;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ stats_base->drops += xstats->tail_drop[i];
+
+ mlxsw_sp_qdisc->stats_base.backlog = 0;
+}
+
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
+ .type = MLXSW_SP_QDISC_PRIO,
+ .check_params = mlxsw_sp_qdisc_prio_check_params,
+ .replace = mlxsw_sp_qdisc_prio_replace,
+ .unoffload = mlxsw_sp_qdisc_prio_unoffload,
+ .destroy = mlxsw_sp_qdisc_prio_destroy,
+ .get_stats = mlxsw_sp_qdisc_get_prio_stats,
+ .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
+};
+
+int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_prio_qopt_offload *p)
+{
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+
+ if (p->parent != TC_H_ROOT)
+ return -EOPNOTSUPP;
+
+ mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc;
+ if (p->command == TC_PRIO_REPLACE)
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc,
+ &mlxsw_sp_qdisc_ops_prio,
+ &p->replace_params);
+
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
+ MLXSW_SP_QDISC_PRIO))
+ return -EOPNOTSUPP;
+
+ switch (p->command) {
+ case TC_PRIO_DESTROY:
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ case TC_PRIO_STATS:
+ return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &p->stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port->root_qdisc = kzalloc(sizeof(*mlxsw_sp_port->root_qdisc),
+ GFP_KERNEL);
+ if (!mlxsw_sp_port->root_qdisc)
+ return -ENOMEM;
+
+ mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
+
+ return 0;
+}
+
+void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ kfree(mlxsw_sp_port->root_qdisc);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 7042c855a5d6..f0b25baba09a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -71,6 +71,7 @@
#include "spectrum_mr_tcam.h"
#include "spectrum_router.h"
+struct mlxsw_sp_fib;
struct mlxsw_sp_vr;
struct mlxsw_sp_lpm_tree;
struct mlxsw_sp_rif_ops;
@@ -84,6 +85,8 @@ struct mlxsw_sp_router {
struct rhashtable nexthop_ht;
struct list_head nexthop_list;
struct {
+ /* One tree for each protocol: IPv4 and IPv6 */
+ struct mlxsw_sp_lpm_tree *proto_trees[2];
struct mlxsw_sp_lpm_tree *trees;
unsigned int tree_count;
} lpm;
@@ -162,6 +165,15 @@ struct mlxsw_sp_rif_ops {
struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
};
+static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
+static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_lpm_tree *lpm_tree);
+static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fib *fib,
+ u8 tree_id);
+static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fib *fib);
+
static unsigned int *
mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir)
@@ -349,14 +361,6 @@ mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
}
-static bool
-mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
-{
- struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
-
- return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
-}
-
static void
mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
struct mlxsw_sp_prefix_usage *prefix_usage2)
@@ -398,7 +402,6 @@ enum mlxsw_sp_fib_entry_type {
};
struct mlxsw_sp_nexthop_group;
-struct mlxsw_sp_fib;
struct mlxsw_sp_fib_node {
struct list_head entry_list;
@@ -445,6 +448,7 @@ struct mlxsw_sp_lpm_tree {
u8 id; /* tree ID */
unsigned int ref_count;
enum mlxsw_sp_l3proto proto;
+ unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
struct mlxsw_sp_prefix_usage prefix_usage;
};
@@ -453,8 +457,6 @@ struct mlxsw_sp_fib {
struct list_head node_list;
struct mlxsw_sp_vr *vr;
struct mlxsw_sp_lpm_tree *lpm_tree;
- unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
- struct mlxsw_sp_prefix_usage prefix_usage;
enum mlxsw_sp_l3proto proto;
};
@@ -469,12 +471,15 @@ struct mlxsw_sp_vr {
static const struct rhashtable_params mlxsw_sp_fib_ht_params;
-static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
+static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_vr *vr,
enum mlxsw_sp_l3proto proto)
{
+ struct mlxsw_sp_lpm_tree *lpm_tree;
struct mlxsw_sp_fib *fib;
int err;
+ lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
fib = kzalloc(sizeof(*fib), GFP_KERNEL);
if (!fib)
return ERR_PTR(-ENOMEM);
@@ -484,17 +489,26 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
INIT_LIST_HEAD(&fib->node_list);
fib->proto = proto;
fib->vr = vr;
+ fib->lpm_tree = lpm_tree;
+ mlxsw_sp_lpm_tree_hold(lpm_tree);
+ err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
+ if (err)
+ goto err_lpm_tree_bind;
return fib;
+err_lpm_tree_bind:
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
err_rhashtable_init:
kfree(fib);
return ERR_PTR(err);
}
-static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
+static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib *fib)
{
+ mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
WARN_ON(!list_empty(&fib->node_list));
- WARN_ON(fib->lpm_tree);
rhashtable_destroy(&fib->ht);
kfree(fib);
}
@@ -581,6 +595,9 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
goto err_left_struct_set;
memcpy(&lpm_tree->prefix_usage, prefix_usage,
sizeof(lpm_tree->prefix_usage));
+ memset(&lpm_tree->prefix_ref_count, 0,
+ sizeof(lpm_tree->prefix_ref_count));
+ lpm_tree->ref_count = 1;
return lpm_tree;
err_left_struct_set:
@@ -607,8 +624,10 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
if (lpm_tree->ref_count != 0 &&
lpm_tree->proto == proto &&
mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
- prefix_usage))
+ prefix_usage)) {
+ mlxsw_sp_lpm_tree_hold(lpm_tree);
return lpm_tree;
+ }
}
return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
}
@@ -629,9 +648,10 @@ static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
struct mlxsw_sp_lpm_tree *lpm_tree;
u64 max_trees;
- int i;
+ int err, i;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
return -EIO;
@@ -649,11 +669,42 @@ static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
}
+ lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
+ MLXSW_SP_L3_PROTO_IPV4);
+ if (IS_ERR(lpm_tree)) {
+ err = PTR_ERR(lpm_tree);
+ goto err_ipv4_tree_get;
+ }
+ mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
+
+ lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
+ MLXSW_SP_L3_PROTO_IPV6);
+ if (IS_ERR(lpm_tree)) {
+ err = PTR_ERR(lpm_tree);
+ goto err_ipv6_tree_get;
+ }
+ mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
+
return 0;
+
+err_ipv6_tree_get:
+ lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+err_ipv4_tree_get:
+ kfree(mlxsw_sp->router->lpm.trees);
+ return err;
}
static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+
+ lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+
+ lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+
kfree(mlxsw_sp->router->lpm.trees);
}
@@ -745,10 +796,10 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
return ERR_PTR(-EBUSY);
}
- vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
+ vr->fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
if (IS_ERR(vr->fib4))
return ERR_CAST(vr->fib4);
- vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
+ vr->fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
if (IS_ERR(vr->fib6)) {
err = PTR_ERR(vr->fib6);
goto err_fib6_create;
@@ -763,21 +814,22 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
return vr;
err_mr_table_create:
- mlxsw_sp_fib_destroy(vr->fib6);
+ mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
vr->fib6 = NULL;
err_fib6_create:
- mlxsw_sp_fib_destroy(vr->fib4);
+ mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
vr->fib4 = NULL;
return ERR_PTR(err);
}
-static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
+static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_vr *vr)
{
mlxsw_sp_mr_table_destroy(vr->mr4_table);
vr->mr4_table = NULL;
- mlxsw_sp_fib_destroy(vr->fib6);
+ mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
vr->fib6 = NULL;
- mlxsw_sp_fib_destroy(vr->fib4);
+ mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
vr->fib4 = NULL;
}
@@ -793,12 +845,12 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
return vr;
}
-static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
+static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
{
if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
list_empty(&vr->fib6->node_list) &&
mlxsw_sp_mr_table_empty(vr->mr4_table))
- mlxsw_sp_vr_destroy(vr);
+ mlxsw_sp_vr_destroy(mlxsw_sp, vr);
}
static bool
@@ -809,7 +861,7 @@ mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
if (!mlxsw_sp_vr_is_used(vr))
return false;
- if (fib->lpm_tree && fib->lpm_tree->id == tree_id)
+ if (fib->lpm_tree->id == tree_id)
return true;
return false;
}
@@ -839,14 +891,13 @@ static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib *fib,
struct mlxsw_sp_lpm_tree *new_tree)
{
- struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
enum mlxsw_sp_l3proto proto = fib->proto;
+ struct mlxsw_sp_lpm_tree *old_tree;
u8 old_id, new_id = new_tree->id;
struct mlxsw_sp_vr *vr;
int i, err;
- if (!old_tree)
- goto no_replace;
+ old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
old_id = old_tree->id;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
@@ -860,6 +911,11 @@ static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
goto err_tree_replace;
}
+ memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
+ sizeof(new_tree->prefix_ref_count));
+ mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
+
return 0;
err_tree_replace:
@@ -871,36 +927,6 @@ err_tree_replace:
old_tree);
}
return err;
-
-no_replace:
- fib->lpm_tree = new_tree;
- mlxsw_sp_lpm_tree_hold(new_tree);
- err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
- if (err) {
- mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
- fib->lpm_tree = NULL;
- return err;
- }
- return 0;
-}
-
-static void
-mlxsw_sp_vrs_prefixes(struct mlxsw_sp *mlxsw_sp,
- enum mlxsw_sp_l3proto proto,
- struct mlxsw_sp_prefix_usage *req_prefix_usage)
-{
- int i;
-
- for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
- struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
- struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
- unsigned char prefix;
-
- if (!mlxsw_sp_vr_is_used(vr))
- continue;
- mlxsw_sp_prefix_usage_for_each(prefix, &fib->prefix_usage)
- mlxsw_sp_prefix_usage_set(req_prefix_usage, prefix);
- }
}
static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
@@ -2630,7 +2656,8 @@ struct mlxsw_sp_nexthop_group_cmp_arg {
static bool
mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
- const struct in6_addr *gw, int ifindex)
+ const struct in6_addr *gw, int ifindex,
+ int weight)
{
int i;
@@ -2638,7 +2665,7 @@ mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
const struct mlxsw_sp_nexthop *nh;
nh = &nh_grp->nexthops[i];
- if (nh->ifindex == ifindex &&
+ if (nh->ifindex == ifindex && nh->nh_weight == weight &&
ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
return true;
}
@@ -2657,11 +2684,13 @@ mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
struct in6_addr *gw;
- int ifindex;
+ int ifindex, weight;
ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex;
+ weight = mlxsw_sp_rt6->rt->rt6i_nh_weight;
gw = &mlxsw_sp_rt6->rt->rt6i_gateway;
- if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex))
+ if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
+ weight))
return false;
}
@@ -4192,68 +4221,66 @@ mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
}
static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib *fib,
struct mlxsw_sp_fib_node *fib_node)
{
- struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
+ struct mlxsw_sp_prefix_usage req_prefix_usage;
+ struct mlxsw_sp_fib *fib = fib_node->fib;
struct mlxsw_sp_lpm_tree *lpm_tree;
int err;
- /* Since the tree is shared between all virtual routers we must
- * make sure it contains all the required prefix lengths. This
- * can be computed by either adding the new prefix length to the
- * existing prefix usage of a bound tree, or by aggregating the
- * prefix lengths across all virtual routers and adding the new
- * one as well.
- */
- if (fib->lpm_tree)
- mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
- &fib->lpm_tree->prefix_usage);
- else
- mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
- mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
+ lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
+ if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
+ goto out;
+ mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
+ mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
fib->proto);
if (IS_ERR(lpm_tree))
return PTR_ERR(lpm_tree);
- if (fib->lpm_tree && fib->lpm_tree->id == lpm_tree->id)
- return 0;
-
err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
if (err)
- return err;
+ goto err_lpm_tree_replace;
+out:
+ lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
return 0;
-}
-static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib *fib)
-{
- if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
- return;
- mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
- mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
- fib->lpm_tree = NULL;
+err_lpm_tree_replace:
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+ return err;
}
-static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
+static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_node *fib_node)
{
- unsigned char prefix_len = fib_node->key.prefix_len;
+ struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
+ struct mlxsw_sp_prefix_usage req_prefix_usage;
struct mlxsw_sp_fib *fib = fib_node->fib;
+ int err;
- if (fib->prefix_ref_count[prefix_len]++ == 0)
- mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
-}
+ if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
+ return;
+ /* Try to construct a new LPM tree from the current prefix usage
+ * minus the unused one. If we fail, continue using the old one.
+ */
+ mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
+ mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
+ fib_node->key.prefix_len);
+ lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
+ fib->proto);
+ if (IS_ERR(lpm_tree))
+ return;
-static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
-{
- unsigned char prefix_len = fib_node->key.prefix_len;
- struct mlxsw_sp_fib *fib = fib_node->fib;
+ err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
+ if (err)
+ goto err_lpm_tree_replace;
- if (--fib->prefix_ref_count[prefix_len] == 0)
- mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
+ return;
+
+err_lpm_tree_replace:
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
}
static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
@@ -4267,12 +4294,10 @@ static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
return err;
fib_node->fib = fib;
- err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib, fib_node);
+ err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
if (err)
goto err_fib_lpm_tree_link;
- mlxsw_sp_fib_node_prefix_inc(fib_node);
-
return 0;
err_fib_lpm_tree_link:
@@ -4286,8 +4311,7 @@ static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_fib *fib = fib_node->fib;
- mlxsw_sp_fib_node_prefix_dec(fib_node);
- mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib);
+ mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
fib_node->fib = NULL;
mlxsw_sp_fib_node_remove(fib, fib_node);
}
@@ -4326,7 +4350,7 @@ mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
err_fib_node_init:
mlxsw_sp_fib_node_destroy(fib_node);
err_fib_node_create:
- mlxsw_sp_vr_put(vr);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
return ERR_PTR(err);
}
@@ -4339,7 +4363,7 @@ static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
return;
mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
mlxsw_sp_fib_node_destroy(fib_node);
- mlxsw_sp_vr_put(vr);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
}
static struct mlxsw_sp_fib4_entry *
@@ -4764,7 +4788,7 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev = rt->dst.dev;
nh->nh_grp = nh_grp;
- nh->nh_weight = 1;
+ nh->nh_weight = rt->rt6i_nh_weight;
memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
@@ -5362,7 +5386,7 @@ static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
return;
mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc);
- mlxsw_sp_vr_put(vr);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
}
static int
@@ -5399,7 +5423,7 @@ mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
return;
mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index);
- mlxsw_sp_vr_put(vr);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
}
static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
@@ -6048,7 +6072,7 @@ err_fid_get:
err_rif_alloc:
err_rif_index_alloc:
vr->rif_count--;
- mlxsw_sp_vr_put(vr);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
return ERR_PTR(err);
}
@@ -6071,7 +6095,7 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
mlxsw_sp_fid_put(fid);
kfree(rif);
vr->rif_count--;
- mlxsw_sp_vr_put(vr);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
}
static void
@@ -6861,7 +6885,7 @@ mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
return 0;
err_loopback_op:
- mlxsw_sp_vr_put(ul_vr);
+ mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
return err;
}
@@ -6875,7 +6899,7 @@ static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false);
--ul_vr->rif_count;
- mlxsw_sp_vr_put(ul_vr);
+ mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
}
static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
@@ -7010,6 +7034,24 @@ static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
}
#endif
+static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char rdpm_pl[MLXSW_REG_RDPM_LEN];
+ unsigned int i;
+
+ MLXSW_REG_ZERO(rdpm, rdpm_pl);
+
+ /* HW is determining switch priority based on DSCP-bits, but the
+ * kernel is still doing that based on the ToS. Since there's a
+ * mismatch in bits we need to make sure to translate the right
+ * value ToS would observe, skipping the 2 least-significant ECN bits.
+ */
+ for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
+ mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
+}
+
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
char rgcr_pl[MLXSW_REG_RGCR_LEN];
@@ -7022,6 +7064,7 @@ static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
+ mlxsw_reg_rgcr_usp_set(rgcr_pl, true);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
if (err)
return err;
@@ -7097,6 +7140,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_mp_hash_init;
+ err = mlxsw_sp_dscp_init(mlxsw_sp);
+ if (err)
+ goto err_dscp_init;
+
mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
mlxsw_sp_router_fib_dump_flush);
@@ -7106,6 +7153,7 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
return 0;
err_register_fib_notifier:
+err_dscp_init:
err_mp_hash_init:
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
err_register_netevent_notifier:
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 2fe96f1f3fe5..bd6e9014bc74 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -28,6 +28,7 @@
#include <linux/spi/spi.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
+#include <linux/of_net.h>
#include "ks8851.h"
@@ -407,15 +408,23 @@ static void ks8851_read_mac_addr(struct net_device *dev)
* @ks: The device structure
*
* Get or create the initial mac address for the device and then set that
- * into the station address register. If there is an EEPROM present, then
+ * into the station address register. A mac address supplied in the device
+ * tree takes precedence. Otherwise, if there is an EEPROM present, then
* we try that. If no valid mac address is found we use eth_random_addr()
* to create a new one.
*/
static void ks8851_init_mac(struct ks8851_net *ks)
{
struct net_device *dev = ks->netdev;
+ const u8 *mac_addr;
+
+ mac_addr = of_get_mac_address(ks->spidev->dev.of_node);
+ if (mac_addr) {
+ memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+ ks8851_write_mac_addr(dev);
+ return;
+ }
- /* first, try reading what we've got already */
if (ks->rc_ccr & CCR_EEPROM) {
ks8851_read_mac_addr(dev);
if (is_valid_ether_addr(dev->dev_addr))
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 24c4408b5734..d5866d708dfa 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -22,6 +22,8 @@ nfp-objs := \
nfp_hwmon.o \
nfp_main.o \
nfp_net_common.o \
+ nfp_net_ctrl.o \
+ nfp_net_debugdump.o \
nfp_net_ethtool.o \
nfp_net_main.o \
nfp_net_repr.o \
@@ -43,6 +45,7 @@ endif
ifeq ($(CONFIG_BPF_SYSCALL),y)
nfp-objs += \
+ bpf/cmsg.o \
bpf/main.o \
bpf/offload.o \
bpf/verifier.o \
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
new file mode 100644
index 000000000000..80d3aa0fc9d3
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -0,0 +1,453 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/bpf.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/jiffies.h>
+#include <linux/skbuff.h>
+#include <linux/wait.h>
+
+#include "../nfp_app.h"
+#include "../nfp_net.h"
+#include "fw.h"
+#include "main.h"
+
+#define cmsg_warn(bpf, msg...) nn_dp_warn(&(bpf)->app->ctrl->dp, msg)
+
+#define NFP_BPF_TAG_ALLOC_SPAN (U16_MAX / 4)
+
+static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
+{
+ u16 used_tags;
+
+ used_tags = bpf->tag_alloc_next - bpf->tag_alloc_last;
+
+ return used_tags > NFP_BPF_TAG_ALLOC_SPAN;
+}
+
+static int nfp_bpf_alloc_tag(struct nfp_app_bpf *bpf)
+{
+ /* All FW communication for BPF is request-reply. To make sure we
+ * don't reuse the message ID too early after timeout - limit the
+ * number of requests in flight.
+ */
+ if (nfp_bpf_all_tags_busy(bpf)) {
+ cmsg_warn(bpf, "all FW request contexts busy!\n");
+ return -EAGAIN;
+ }
+
+ WARN_ON(__test_and_set_bit(bpf->tag_alloc_next, bpf->tag_allocator));
+ return bpf->tag_alloc_next++;
+}
+
+static void nfp_bpf_free_tag(struct nfp_app_bpf *bpf, u16 tag)
+{
+ WARN_ON(!__test_and_clear_bit(tag, bpf->tag_allocator));
+
+ while (!test_bit(bpf->tag_alloc_last, bpf->tag_allocator) &&
+ bpf->tag_alloc_last != bpf->tag_alloc_next)
+ bpf->tag_alloc_last++;
+}
+
+static struct sk_buff *
+nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
+{
+ struct sk_buff *skb;
+
+ skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
+ skb_put(skb, size);
+
+ return skb;
+}
+
+static struct sk_buff *
+nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
+{
+ unsigned int size;
+
+ size = sizeof(struct cmsg_req_map_op);
+ size += sizeof(struct cmsg_key_value_pair) * n;
+
+ return nfp_bpf_cmsg_alloc(bpf, size);
+}
+
+static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb)
+{
+ struct cmsg_hdr *hdr;
+
+ hdr = (struct cmsg_hdr *)skb->data;
+
+ return be16_to_cpu(hdr->tag);
+}
+
+static struct sk_buff *__nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
+{
+ unsigned int msg_tag;
+ struct sk_buff *skb;
+
+ skb_queue_walk(&bpf->cmsg_replies, skb) {
+ msg_tag = nfp_bpf_cmsg_get_tag(skb);
+ if (msg_tag == tag) {
+ nfp_bpf_free_tag(bpf, tag);
+ __skb_unlink(skb, &bpf->cmsg_replies);
+ return skb;
+ }
+ }
+
+ return NULL;
+}
+
+static struct sk_buff *nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
+{
+ struct sk_buff *skb;
+
+ nfp_ctrl_lock(bpf->app->ctrl);
+ skb = __nfp_bpf_reply(bpf, tag);
+ nfp_ctrl_unlock(bpf->app->ctrl);
+
+ return skb;
+}
+
+static struct sk_buff *nfp_bpf_reply_drop_tag(struct nfp_app_bpf *bpf, u16 tag)
+{
+ struct sk_buff *skb;
+
+ nfp_ctrl_lock(bpf->app->ctrl);
+ skb = __nfp_bpf_reply(bpf, tag);
+ if (!skb)
+ nfp_bpf_free_tag(bpf, tag);
+ nfp_ctrl_unlock(bpf->app->ctrl);
+
+ return skb;
+}
+
+static struct sk_buff *
+nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type,
+ int tag)
+{
+ struct sk_buff *skb;
+ int i, err;
+
+ for (i = 0; i < 50; i++) {
+ udelay(4);
+ skb = nfp_bpf_reply(bpf, tag);
+ if (skb)
+ return skb;
+ }
+
+ err = wait_event_interruptible_timeout(bpf->cmsg_wq,
+ skb = nfp_bpf_reply(bpf, tag),
+ msecs_to_jiffies(5000));
+ /* We didn't get a response - try last time and atomically drop
+ * the tag even if no response is matched.
+ */
+ if (!skb)
+ skb = nfp_bpf_reply_drop_tag(bpf, tag);
+ if (err < 0) {
+ cmsg_warn(bpf, "%s waiting for response to 0x%02x: %d\n",
+ err == ERESTARTSYS ? "interrupted" : "error",
+ type, err);
+ return ERR_PTR(err);
+ }
+ if (!skb) {
+ cmsg_warn(bpf, "timeout waiting for response to 0x%02x\n",
+ type);
+ return ERR_PTR(-ETIMEDOUT);
+ }
+
+ return skb;
+}
+
+static struct sk_buff *
+nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb,
+ enum nfp_bpf_cmsg_type type, unsigned int reply_size)
+{
+ struct cmsg_hdr *hdr;
+ int tag;
+
+ nfp_ctrl_lock(bpf->app->ctrl);
+ tag = nfp_bpf_alloc_tag(bpf);
+ if (tag < 0) {
+ nfp_ctrl_unlock(bpf->app->ctrl);
+ dev_kfree_skb_any(skb);
+ return ERR_PTR(tag);
+ }
+
+ hdr = (void *)skb->data;
+ hdr->ver = CMSG_MAP_ABI_VERSION;
+ hdr->type = type;
+ hdr->tag = cpu_to_be16(tag);
+
+ __nfp_app_ctrl_tx(bpf->app, skb);
+
+ nfp_ctrl_unlock(bpf->app->ctrl);
+
+ skb = nfp_bpf_cmsg_wait_reply(bpf, type, tag);
+ if (IS_ERR(skb))
+ return skb;
+
+ hdr = (struct cmsg_hdr *)skb->data;
+ /* 0 reply_size means caller will do the validation */
+ if (reply_size && skb->len != reply_size) {
+ cmsg_warn(bpf, "cmsg drop - wrong size %d != %d!\n",
+ skb->len, reply_size);
+ goto err_free;
+ }
+ if (hdr->type != __CMSG_REPLY(type)) {
+ cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
+ hdr->type, __CMSG_REPLY(type));
+ goto err_free;
+ }
+
+ return skb;
+err_free:
+ dev_kfree_skb_any(skb);
+ return ERR_PTR(-EIO);
+}
+
+static int
+nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf,
+ struct cmsg_reply_map_simple *reply)
+{
+ static const int res_table[] = {
+ [CMSG_RC_SUCCESS] = 0,
+ [CMSG_RC_ERR_MAP_FD] = -EBADFD,
+ [CMSG_RC_ERR_MAP_NOENT] = -ENOENT,
+ [CMSG_RC_ERR_MAP_ERR] = -EINVAL,
+ [CMSG_RC_ERR_MAP_PARSE] = -EIO,
+ [CMSG_RC_ERR_MAP_EXIST] = -EEXIST,
+ [CMSG_RC_ERR_MAP_NOMEM] = -ENOMEM,
+ [CMSG_RC_ERR_MAP_E2BIG] = -E2BIG,
+ };
+ u32 rc;
+
+ rc = be32_to_cpu(reply->rc);
+ if (rc >= ARRAY_SIZE(res_table)) {
+ cmsg_warn(bpf, "FW responded with invalid status: %u\n", rc);
+ return -EIO;
+ }
+
+ return res_table[rc];
+}
+
+long long int
+nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map)
+{
+ struct cmsg_reply_map_alloc_tbl *reply;
+ struct cmsg_req_map_alloc_tbl *req;
+ struct sk_buff *skb;
+ u32 tid;
+ int err;
+
+ skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
+ if (!skb)
+ return -ENOMEM;
+
+ req = (void *)skb->data;
+ req->key_size = cpu_to_be32(map->key_size);
+ req->value_size = cpu_to_be32(map->value_size);
+ req->max_entries = cpu_to_be32(map->max_entries);
+ req->map_type = cpu_to_be32(map->map_type);
+ req->map_flags = 0;
+
+ skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_ALLOC,
+ sizeof(*reply));
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ reply = (void *)skb->data;
+ err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
+ if (err)
+ goto err_free;
+
+ tid = be32_to_cpu(reply->tid);
+ dev_consume_skb_any(skb);
+
+ return tid;
+err_free:
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
+{
+ struct cmsg_reply_map_free_tbl *reply;
+ struct cmsg_req_map_free_tbl *req;
+ struct sk_buff *skb;
+ int err;
+
+ skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
+ if (!skb) {
+ cmsg_warn(bpf, "leaking map - failed to allocate msg\n");
+ return;
+ }
+
+ req = (void *)skb->data;
+ req->tid = cpu_to_be32(nfp_map->tid);
+
+ skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_FREE,
+ sizeof(*reply));
+ if (IS_ERR(skb)) {
+ cmsg_warn(bpf, "leaking map - I/O error\n");
+ return;
+ }
+
+ reply = (void *)skb->data;
+ err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
+ if (err)
+ cmsg_warn(bpf, "leaking map - FW responded with: %d\n", err);
+
+ dev_consume_skb_any(skb);
+}
+
+static int
+nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
+ enum nfp_bpf_cmsg_type op,
+ u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
+{
+ struct nfp_bpf_map *nfp_map = offmap->dev_priv;
+ struct nfp_app_bpf *bpf = nfp_map->bpf;
+ struct bpf_map *map = &offmap->map;
+ struct cmsg_reply_map_op *reply;
+ struct cmsg_req_map_op *req;
+ struct sk_buff *skb;
+ int err;
+
+ /* FW messages have no space for more than 32 bits of flags */
+ if (flags >> 32)
+ return -EOPNOTSUPP;
+
+ skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1);
+ if (!skb)
+ return -ENOMEM;
+
+ req = (void *)skb->data;
+ req->tid = cpu_to_be32(nfp_map->tid);
+ req->count = cpu_to_be32(1);
+ req->flags = cpu_to_be32(flags);
+
+ /* Copy inputs */
+ if (key)
+ memcpy(&req->elem[0].key, key, map->key_size);
+ if (value)
+ memcpy(&req->elem[0].value, value, map->value_size);
+
+ skb = nfp_bpf_cmsg_communicate(bpf, skb, op,
+ sizeof(*reply) + sizeof(*reply->elem));
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ reply = (void *)skb->data;
+ err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
+ if (err)
+ goto err_free;
+
+ /* Copy outputs */
+ if (out_key)
+ memcpy(out_key, &reply->elem[0].key, map->key_size);
+ if (out_value)
+ memcpy(out_value, &reply->elem[0].value, map->value_size);
+
+ dev_consume_skb_any(skb);
+
+ return 0;
+err_free:
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
+ void *key, void *value, u64 flags)
+{
+ return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_UPDATE,
+ key, value, flags, NULL, NULL);
+}
+
+int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key)
+{
+ return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_DELETE,
+ key, NULL, 0, NULL, NULL);
+}
+
+int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
+ void *key, void *value)
+{
+ return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_LOOKUP,
+ key, NULL, 0, NULL, value);
+}
+
+int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
+ void *next_key)
+{
+ return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETFIRST,
+ NULL, NULL, 0, next_key, NULL);
+}
+
+int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
+ void *key, void *next_key)
+{
+ return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETNEXT,
+ key, NULL, 0, next_key, NULL);
+}
+
+void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
+{
+ struct nfp_app_bpf *bpf = app->priv;
+ unsigned int tag;
+
+ if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
+ cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
+ goto err_free;
+ }
+
+ nfp_ctrl_lock(bpf->app->ctrl);
+
+ tag = nfp_bpf_cmsg_get_tag(skb);
+ if (unlikely(!test_bit(tag, bpf->tag_allocator))) {
+ cmsg_warn(bpf, "cmsg drop - no one is waiting for tag %u!\n",
+ tag);
+ goto err_unlock;
+ }
+
+ __skb_queue_tail(&bpf->cmsg_replies, skb);
+ wake_up_interruptible_all(&bpf->cmsg_wq);
+
+ nfp_ctrl_unlock(bpf->app->ctrl);
+
+ return;
+err_unlock:
+ nfp_ctrl_unlock(bpf->app->ctrl);
+err_free:
+ dev_kfree_skb_any(skb);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
new file mode 100644
index 000000000000..cfcc7bcb2c67
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef NFP_BPF_FW_H
+#define NFP_BPF_FW_H 1
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+enum bpf_cap_tlv_type {
+ NFP_BPF_CAP_TYPE_FUNC = 1,
+ NFP_BPF_CAP_TYPE_ADJUST_HEAD = 2,
+ NFP_BPF_CAP_TYPE_MAPS = 3,
+};
+
+struct nfp_bpf_cap_tlv_func {
+ __le32 func_id;
+ __le32 func_addr;
+};
+
+struct nfp_bpf_cap_tlv_adjust_head {
+ __le32 flags;
+ __le32 off_min;
+ __le32 off_max;
+ __le32 guaranteed_sub;
+ __le32 guaranteed_add;
+};
+
+#define NFP_BPF_ADJUST_HEAD_NO_META BIT(0)
+
+struct nfp_bpf_cap_tlv_maps {
+ __le32 types;
+ __le32 max_maps;
+ __le32 max_elems;
+ __le32 max_key_sz;
+ __le32 max_val_sz;
+ __le32 max_elem_sz;
+};
+
+/*
+ * Types defined for map related control messages
+ */
+#define CMSG_MAP_ABI_VERSION 1
+
+enum nfp_bpf_cmsg_type {
+ CMSG_TYPE_MAP_ALLOC = 1,
+ CMSG_TYPE_MAP_FREE = 2,
+ CMSG_TYPE_MAP_LOOKUP = 3,
+ CMSG_TYPE_MAP_UPDATE = 4,
+ CMSG_TYPE_MAP_DELETE = 5,
+ CMSG_TYPE_MAP_GETNEXT = 6,
+ CMSG_TYPE_MAP_GETFIRST = 7,
+ __CMSG_TYPE_MAP_MAX,
+};
+
+#define CMSG_TYPE_MAP_REPLY_BIT 7
+#define __CMSG_REPLY(req) (BIT(CMSG_TYPE_MAP_REPLY_BIT) | (req))
+
+#define CMSG_MAP_KEY_LW 16
+#define CMSG_MAP_VALUE_LW 16
+
+enum nfp_bpf_cmsg_status {
+ CMSG_RC_SUCCESS = 0,
+ CMSG_RC_ERR_MAP_FD = 1,
+ CMSG_RC_ERR_MAP_NOENT = 2,
+ CMSG_RC_ERR_MAP_ERR = 3,
+ CMSG_RC_ERR_MAP_PARSE = 4,
+ CMSG_RC_ERR_MAP_EXIST = 5,
+ CMSG_RC_ERR_MAP_NOMEM = 6,
+ CMSG_RC_ERR_MAP_E2BIG = 7,
+};
+
+struct cmsg_hdr {
+ u8 type;
+ u8 ver;
+ __be16 tag;
+};
+
+struct cmsg_reply_map_simple {
+ struct cmsg_hdr hdr;
+ __be32 rc;
+};
+
+struct cmsg_req_map_alloc_tbl {
+ struct cmsg_hdr hdr;
+ __be32 key_size; /* in bytes */
+ __be32 value_size; /* in bytes */
+ __be32 max_entries;
+ __be32 map_type;
+ __be32 map_flags; /* reserved */
+};
+
+struct cmsg_reply_map_alloc_tbl {
+ struct cmsg_reply_map_simple reply_hdr;
+ __be32 tid;
+};
+
+struct cmsg_req_map_free_tbl {
+ struct cmsg_hdr hdr;
+ __be32 tid;
+};
+
+struct cmsg_reply_map_free_tbl {
+ struct cmsg_reply_map_simple reply_hdr;
+ __be32 count;
+};
+
+struct cmsg_key_value_pair {
+ __be32 key[CMSG_MAP_KEY_LW];
+ __be32 value[CMSG_MAP_VALUE_LW];
+};
+
+struct cmsg_req_map_op {
+ struct cmsg_hdr hdr;
+ __be32 tid;
+ __be32 count;
+ __be32 flags;
+ struct cmsg_key_value_pair elem[0];
+};
+
+struct cmsg_reply_map_op {
+ struct cmsg_reply_map_simple reply_hdr;
+ __be32 count;
+ __be32 resv;
+ struct cmsg_key_value_pair elem[0];
+};
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 995e95410b11..56451edf01c2 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 Netronome Systems, Inc.
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -33,6 +33,7 @@
#define pr_fmt(fmt) "NFP net bpf: " fmt
+#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/bpf.h>
#include <linux/filter.h>
@@ -66,12 +67,6 @@
next2 = nfp_meta_next(next))
static bool
-nfp_meta_has_next(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return meta->l.next != &nfp_prog->insns;
-}
-
-static bool
nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return meta->l.prev != &nfp_prog->insns;
@@ -90,19 +85,25 @@ static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn)
static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog)
{
- return nfp_prog->start_off + nfp_prog->prog_len;
+ return nfp_prog->prog_len;
}
-static unsigned int
-nfp_prog_offset_to_index(struct nfp_prog *nfp_prog, unsigned int offset)
+static bool
+nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off)
{
- return offset - nfp_prog->start_off;
+ /* If there is a recorded error we may have dropped instructions;
+ * that doesn't have to be due to translator bug, and the translation
+ * will fail anyway, so just return OK.
+ */
+ if (nfp_prog->error)
+ return true;
+ return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off);
}
/* --- Emitters --- */
static void
__emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
- u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync)
+ u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync, bool indir)
{
enum cmd_ctx_swap ctx;
u64 insn;
@@ -120,14 +121,15 @@ __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
FIELD_PREP(OP_CMD_CNT, size) |
FIELD_PREP(OP_CMD_SIG, sync) |
FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) |
+ FIELD_PREP(OP_CMD_INDIR, indir) |
FIELD_PREP(OP_CMD_MODE, mode);
nfp_prog_push(nfp_prog, insn);
}
static void
-emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
- u8 mode, u8 xfer, swreg lreg, swreg rreg, u8 size, bool sync)
+emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
+ swreg lreg, swreg rreg, u8 size, bool sync, bool indir)
{
struct nfp_insn_re_regs reg;
int err;
@@ -148,7 +150,22 @@ emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
return;
}
- __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync);
+ __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync,
+ indir);
+}
+
+static void
+emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
+ swreg lreg, swreg rreg, u8 size, bool sync)
+{
+ emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, sync, false);
+}
+
+static void
+emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
+ swreg lreg, swreg rreg, u8 size, bool sync)
+{
+ emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, sync, true);
}
static void
@@ -172,22 +189,28 @@ __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip,
nfp_prog_push(nfp_prog, insn);
}
-static void emit_br_def(struct nfp_prog *nfp_prog, u16 addr, u8 defer)
+static void
+emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer,
+ enum nfp_relo_type relo)
{
- if (defer > 2) {
+ if (mask == BR_UNC && defer > 2) {
pr_err("BUG: branch defer out of bounds %d\n", defer);
nfp_prog->error = -EFAULT;
return;
}
- __emit_br(nfp_prog, BR_UNC, BR_EV_PIP_UNCOND, BR_CSS_NONE, addr, defer);
+
+ __emit_br(nfp_prog, mask,
+ mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND,
+ BR_CSS_NONE, addr, defer);
+
+ nfp_prog->prog[nfp_prog->prog_len - 1] |=
+ FIELD_PREP(OP_RELO_TYPE, relo);
}
static void
emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer)
{
- __emit_br(nfp_prog, mask,
- mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND,
- BR_CSS_NONE, addr, defer);
+ emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL);
}
static void
@@ -230,9 +253,11 @@ emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm,
return;
}
- __emit_immed(nfp_prog, reg.areg, reg.breg, imm >> 8, width,
- invert, shift, reg.wr_both,
- reg.dst_lmextn, reg.src_lmextn);
+ /* Use reg.dst when destination is No-Dest. */
+ __emit_immed(nfp_prog,
+ swreg_type(dst) == NN_REG_NONE ? reg.dst : reg.areg,
+ reg.breg, imm >> 8, width, invert, shift,
+ reg.wr_both, reg.dst_lmextn, reg.src_lmextn);
}
static void
@@ -458,6 +483,21 @@ static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm)
}
}
+static void
+wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm,
+ enum nfp_relo_type relo)
+{
+ if (imm > 0xffff) {
+ pr_err("relocation of a large immediate!\n");
+ nfp_prog->error = -EFAULT;
+ return;
+ }
+ emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B);
+
+ nfp_prog->prog[nfp_prog->prog_len - 1] |=
+ FIELD_PREP(OP_RELO_TYPE, relo);
+}
+
/* ur_load_imm_any() - encode immediate or use tmp register (unrestricted)
* If the @imm is small enough encode it directly in operand and return
* otherwise load @imm to a spare register and return its encoding.
@@ -490,24 +530,179 @@ static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count)
emit_nop(nfp_prog);
}
+static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src)
+{
+ emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src);
+}
+
+static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src)
+{
+ wrp_mov(nfp_prog, reg_both(dst), reg_b(src));
+}
+
+/* wrp_reg_subpart() - load @field_len bytes from @offset of @src, write the
+ * result to @dst from low end.
+ */
static void
-wrp_br_special(struct nfp_prog *nfp_prog, enum br_mask mask,
- enum br_special special)
+wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len,
+ u8 offset)
{
- emit_br(nfp_prog, mask, 0, 0);
+ enum shf_sc sc = offset ? SHF_SC_R_SHF : SHF_SC_NONE;
+ u8 mask = (1 << field_len) - 1;
- nfp_prog->prog[nfp_prog->prog_len - 1] |=
- FIELD_PREP(OP_BR_SPECIAL, special);
+ emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true);
}
-static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src)
+static void
+addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
+ swreg *rega, swreg *regb)
{
- emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src);
+ if (offset == reg_imm(0)) {
+ *rega = reg_a(src_gpr);
+ *regb = reg_b(src_gpr + 1);
+ return;
+ }
+
+ emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(src_gpr), ALU_OP_ADD, offset);
+ emit_alu(nfp_prog, imm_b(nfp_prog), reg_b(src_gpr + 1), ALU_OP_ADD_C,
+ reg_imm(0));
+ *rega = imm_a(nfp_prog);
+ *regb = imm_b(nfp_prog);
}
-static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src)
+/* NFP has Command Push Pull bus which supports bluk memory operations. */
+static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- wrp_mov(nfp_prog, reg_both(dst), reg_b(src));
+ bool descending_seq = meta->ldst_gather_len < 0;
+ s16 len = abs(meta->ldst_gather_len);
+ swreg src_base, off;
+ bool src_40bit_addr;
+ unsigned int i;
+ u8 xfer_num;
+
+ off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
+ src_40bit_addr = meta->ptr.type == PTR_TO_MAP_VALUE;
+ src_base = reg_a(meta->insn.src_reg * 2);
+ xfer_num = round_up(len, 4) / 4;
+
+ if (src_40bit_addr)
+ addr40_offset(nfp_prog, meta->insn.src_reg, off, &src_base,
+ &off);
+
+ /* Setup PREV_ALU fields to override memory read length. */
+ if (len > 32)
+ wrp_immed(nfp_prog, reg_none(),
+ CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));
+
+ /* Memory read from source addr into transfer-in registers. */
+ emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP,
+ src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0,
+ src_base, off, xfer_num - 1, true, len > 32);
+
+ /* Move from transfer-in to transfer-out. */
+ for (i = 0; i < xfer_num; i++)
+ wrp_mov(nfp_prog, reg_xfer(i), reg_xfer(i));
+
+ off = re_load_imm_any(nfp_prog, meta->paired_st->off, imm_b(nfp_prog));
+
+ if (len <= 8) {
+ /* Use single direct_ref write8. */
+ emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
+ reg_a(meta->paired_st->dst_reg * 2), off, len - 1,
+ true);
+ } else if (len <= 32 && IS_ALIGNED(len, 4)) {
+ /* Use single direct_ref write32. */
+ emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
+ reg_a(meta->paired_st->dst_reg * 2), off, xfer_num - 1,
+ true);
+ } else if (len <= 32) {
+ /* Use single indirect_ref write8. */
+ wrp_immed(nfp_prog, reg_none(),
+ CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, len - 1));
+ emit_cmd_indir(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
+ reg_a(meta->paired_st->dst_reg * 2), off,
+ len - 1, true);
+ } else if (IS_ALIGNED(len, 4)) {
+ /* Use single indirect_ref write32. */
+ wrp_immed(nfp_prog, reg_none(),
+ CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));
+ emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
+ reg_a(meta->paired_st->dst_reg * 2), off,
+ xfer_num - 1, true);
+ } else if (len <= 40) {
+ /* Use one direct_ref write32 to write the first 32-bytes, then
+ * another direct_ref write8 to write the remaining bytes.
+ */
+ emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
+ reg_a(meta->paired_st->dst_reg * 2), off, 7,
+ true);
+
+ off = re_load_imm_any(nfp_prog, meta->paired_st->off + 32,
+ imm_b(nfp_prog));
+ emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 8,
+ reg_a(meta->paired_st->dst_reg * 2), off, len - 33,
+ true);
+ } else {
+ /* Use one indirect_ref write32 to write 4-bytes aligned length,
+ * then another direct_ref write8 to write the remaining bytes.
+ */
+ u8 new_off;
+
+ wrp_immed(nfp_prog, reg_none(),
+ CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 2));
+ emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
+ reg_a(meta->paired_st->dst_reg * 2), off,
+ xfer_num - 2, true);
+ new_off = meta->paired_st->off + (xfer_num - 1) * 4;
+ off = re_load_imm_any(nfp_prog, new_off, imm_b(nfp_prog));
+ emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b,
+ xfer_num - 1, reg_a(meta->paired_st->dst_reg * 2), off,
+ (len & 0x3) - 1, true);
+ }
+
+ /* TODO: The following extra load is to make sure data flow be identical
+ * before and after we do memory copy optimization.
+ *
+ * The load destination register is not guaranteed to be dead, so we
+ * need to make sure it is loaded with the value the same as before
+ * this transformation.
+ *
+ * These extra loads could be removed once we have accurate register
+ * usage information.
+ */
+ if (descending_seq)
+ xfer_num = 0;
+ else if (BPF_SIZE(meta->insn.code) != BPF_DW)
+ xfer_num = xfer_num - 1;
+ else
+ xfer_num = xfer_num - 2;
+
+ switch (BPF_SIZE(meta->insn.code)) {
+ case BPF_B:
+ wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2),
+ reg_xfer(xfer_num), 1,
+ IS_ALIGNED(len, 4) ? 3 : (len & 3) - 1);
+ break;
+ case BPF_H:
+ wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2),
+ reg_xfer(xfer_num), 2, (len & 3) ^ 2);
+ break;
+ case BPF_W:
+ wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2),
+ reg_xfer(0));
+ break;
+ case BPF_DW:
+ wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2),
+ reg_xfer(xfer_num));
+ wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1),
+ reg_xfer(xfer_num + 1));
+ break;
+ }
+
+ if (BPF_SIZE(meta->insn.code) != BPF_DW)
+ wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
+
+ return 0;
}
static int
@@ -540,20 +735,20 @@ data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size)
}
static int
-data_ld_host_order(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
- u8 dst_gpr, int size)
+data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr,
+ swreg lreg, swreg rreg, int size, enum cmd_mode mode)
{
unsigned int i;
u8 mask, sz;
- /* We load the value from the address indicated in @offset and then
+ /* We load the value from the address indicated in rreg + lreg and then
* mask out the data we don't need. Note: this is little endian!
*/
sz = max(size, 4);
mask = size < 4 ? GENMASK(size - 1, 0) : 0;
- emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0,
- reg_a(src_gpr), offset, sz / 4 - 1, true);
+ emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0,
+ lreg, rreg, sz / 4 - 1, true);
i = 0;
if (mask)
@@ -570,6 +765,26 @@ data_ld_host_order(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
}
static int
+data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
+ u8 dst_gpr, u8 size)
+{
+ return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset,
+ size, CMD_MODE_32b);
+}
+
+static int
+data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
+ u8 dst_gpr, u8 size)
+{
+ swreg rega, regb;
+
+ addr40_offset(nfp_prog, src_gpr, offset, &rega, &regb);
+
+ return data_ld_host_order(nfp_prog, dst_gpr, rega, regb,
+ size, CMD_MODE_40b_BA);
+}
+
+static int
construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
{
swreg tmp_reg;
@@ -583,7 +798,7 @@ construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size));
emit_alu(nfp_prog, reg_none(),
plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog));
- wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
+ emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT);
/* Load data */
return data_ld(nfp_prog, imm_b(nfp_prog), 0, size);
@@ -596,7 +811,7 @@ static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
/* Check packet length */
tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog));
emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg);
- wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
+ emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT);
/* Load data */
tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
@@ -975,9 +1190,6 @@ wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
{
const struct bpf_insn *insn = &meta->insn;
- if (insn->off < 0) /* TODO */
- return -EOPNOTSUPP;
-
wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op,
insn->src_reg * 2, br_mask, insn->off);
wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op,
@@ -995,9 +1207,6 @@ wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
u8 reg = insn->dst_reg * 2;
swreg tmp_reg;
- if (insn->off < 0) /* TODO */
- return -EOPNOTSUPP;
-
tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
if (!swap)
emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg);
@@ -1027,9 +1236,6 @@ wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
areg = insn->dst_reg * 2;
breg = insn->src_reg * 2;
- if (insn->off < 0) /* TODO */
- return -EOPNOTSUPP;
-
if (swap) {
areg ^= breg;
breg ^= areg;
@@ -1052,6 +1258,136 @@ static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out)
SHF_SC_R_ROT, 16);
}
+static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog);
+ struct nfp_bpf_cap_adjust_head *adjust_head;
+ u32 ret_einval, end;
+
+ adjust_head = &nfp_prog->bpf->adjust_head;
+
+ /* Optimized version - 5 vs 14 cycles */
+ if (nfp_prog->adjust_head_location != UINT_MAX) {
+ if (WARN_ON_ONCE(nfp_prog->adjust_head_location != meta->n))
+ return -EINVAL;
+
+ emit_alu(nfp_prog, pptr_reg(nfp_prog),
+ reg_a(2 * 2), ALU_OP_ADD, pptr_reg(nfp_prog));
+ emit_alu(nfp_prog, plen_reg(nfp_prog),
+ plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
+ emit_alu(nfp_prog, pv_len(nfp_prog),
+ pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
+
+ wrp_immed(nfp_prog, reg_both(0), 0);
+ wrp_immed(nfp_prog, reg_both(1), 0);
+
+ /* TODO: when adjust head is guaranteed to succeed we can
+ * also eliminate the following if (r0 == 0) branch.
+ */
+
+ return 0;
+ }
+
+ ret_einval = nfp_prog_current_offset(nfp_prog) + 14;
+ end = ret_einval + 2;
+
+ /* We need to use a temp because offset is just a part of the pkt ptr */
+ emit_alu(nfp_prog, tmp,
+ reg_a(2 * 2), ALU_OP_ADD_2B, pptr_reg(nfp_prog));
+
+ /* Validate result will fit within FW datapath constraints */
+ emit_alu(nfp_prog, reg_none(),
+ tmp, ALU_OP_SUB, reg_imm(adjust_head->off_min));
+ emit_br(nfp_prog, BR_BLO, ret_einval, 0);
+ emit_alu(nfp_prog, reg_none(),
+ reg_imm(adjust_head->off_max), ALU_OP_SUB, tmp);
+ emit_br(nfp_prog, BR_BLO, ret_einval, 0);
+
+ /* Validate the length is at least ETH_HLEN */
+ emit_alu(nfp_prog, tmp_len,
+ plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
+ emit_alu(nfp_prog, reg_none(),
+ tmp_len, ALU_OP_SUB, reg_imm(ETH_HLEN));
+ emit_br(nfp_prog, BR_BMI, ret_einval, 0);
+
+ /* Load the ret code */
+ wrp_immed(nfp_prog, reg_both(0), 0);
+ wrp_immed(nfp_prog, reg_both(1), 0);
+
+ /* Modify the packet metadata */
+ emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0);
+
+ /* Skip over the -EINVAL ret code (defer 2) */
+ emit_br(nfp_prog, BR_UNC, end, 2);
+
+ emit_alu(nfp_prog, plen_reg(nfp_prog),
+ plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
+ emit_alu(nfp_prog, pv_len(nfp_prog),
+ pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
+
+ /* return -EINVAL target */
+ if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval))
+ return -EINVAL;
+
+ wrp_immed(nfp_prog, reg_both(0), -22);
+ wrp_immed(nfp_prog, reg_both(1), ~0);
+
+ if (!nfp_prog_confirm_current_offset(nfp_prog, end))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+map_lookup_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ struct bpf_offloaded_map *offmap;
+ struct nfp_bpf_map *nfp_map;
+ bool load_lm_ptr;
+ u32 ret_tgt;
+ s64 lm_off;
+ swreg tid;
+
+ offmap = (struct bpf_offloaded_map *)meta->arg1.map_ptr;
+ nfp_map = offmap->dev_priv;
+
+ /* We only have to reload LM0 if the key is not at start of stack */
+ lm_off = nfp_prog->stack_depth;
+ lm_off += meta->arg2.var_off.value + meta->arg2.off;
+ load_lm_ptr = meta->arg2_var_off || lm_off;
+
+ /* Set LM0 to start of key */
+ if (load_lm_ptr)
+ emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0);
+
+ /* Load map ID into a register, it should actually fit as an immediate
+ * but in case it doesn't deal with it here, not in the delay slots.
+ */
+ tid = ur_load_imm_any(nfp_prog, nfp_map->tid, imm_a(nfp_prog));
+
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + BPF_FUNC_map_lookup_elem,
+ 2, RELO_BR_HELPER);
+ ret_tgt = nfp_prog_current_offset(nfp_prog) + 2;
+
+ /* Load map ID into A0 */
+ wrp_mov(nfp_prog, reg_a(0), tid);
+
+ /* Load the return address into B0 */
+ wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL);
+
+ if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt))
+ return -EINVAL;
+
+ /* Reset the LM0 pointer */
+ if (!load_lm_ptr)
+ return 0;
+
+ emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0);
+ wrp_nops(nfp_prog, 3);
+
+ return 0;
+}
+
/* --- Callbacks --- */
static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
@@ -1486,14 +1822,29 @@ mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
- return data_ld_host_order(nfp_prog, meta->insn.src_reg * 2, tmp_reg,
- meta->insn.dst_reg * 2, size);
+ return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2,
+ tmp_reg, meta->insn.dst_reg * 2, size);
+}
+
+static int
+mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int size)
+{
+ swreg tmp_reg;
+
+ tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
+
+ return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2,
+ tmp_reg, meta->insn.dst_reg * 2, size);
}
static int
mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
unsigned int size)
{
+ if (meta->ldst_gather_len)
+ return nfp_cpp_memcpy(nfp_prog, meta);
+
if (meta->ptr.type == PTR_TO_CTX) {
if (nfp_prog->type == BPF_PROG_TYPE_XDP)
return mem_ldx_xdp(nfp_prog, meta, size);
@@ -1508,6 +1859,9 @@ mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
return mem_ldx_stack(nfp_prog, meta, size,
meta->ptr.off + meta->ptr.var_off.value);
+ if (meta->ptr.type == PTR_TO_MAP_VALUE)
+ return mem_ldx_emem(nfp_prog, meta, size);
+
return -EOPNOTSUPP;
}
@@ -1630,8 +1984,6 @@ static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- if (meta->insn.off < 0) /* TODO */
- return -EOPNOTSUPP;
emit_br(nfp_prog, BR_UNC, meta->insn.off, 0);
return 0;
@@ -1646,9 +1998,6 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
or1 = reg_a(insn->dst_reg * 2);
or2 = reg_b(insn->dst_reg * 2 + 1);
- if (insn->off < 0) /* TODO */
- return -EOPNOTSUPP;
-
if (imm & ~0U) {
tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
emit_alu(nfp_prog, imm_a(nfp_prog),
@@ -1689,15 +2038,32 @@ static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true);
}
+static int jsgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_imm(nfp_prog, meta, BR_BLT, true);
+}
+
+static int jsge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_imm(nfp_prog, meta, BR_BGE, false);
+}
+
+static int jslt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_imm(nfp_prog, meta, BR_BLT, false);
+}
+
+static int jsle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_imm(nfp_prog, meta, BR_BGE, true);
+}
+
static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
swreg tmp_reg;
- if (insn->off < 0) /* TODO */
- return -EOPNOTSUPP;
-
if (!imm) {
meta->skip = true;
return 0;
@@ -1726,9 +2092,6 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
u64 imm = insn->imm; /* sign extend */
swreg tmp_reg;
- if (insn->off < 0) /* TODO */
- return -EOPNOTSUPP;
-
if (!imm) {
emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1));
@@ -1753,9 +2116,6 @@ static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
- if (insn->off < 0) /* TODO */
- return -EOPNOTSUPP;
-
emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2),
ALU_OP_XOR, reg_b(insn->src_reg * 2));
emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1),
@@ -1787,6 +2147,26 @@ static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true);
}
+static int jsgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_reg(nfp_prog, meta, BR_BLT, true);
+}
+
+static int jsge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_reg(nfp_prog, meta, BR_BGE, false);
+}
+
+static int jslt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_reg(nfp_prog, meta, BR_BLT, false);
+}
+
+static int jsle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_reg(nfp_prog, meta, BR_BGE, true);
+}
+
static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE);
@@ -1797,9 +2177,22 @@ static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE);
}
+static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ switch (meta->insn.imm) {
+ case BPF_FUNC_xdp_adjust_head:
+ return adjust_head(nfp_prog, meta);
+ case BPF_FUNC_map_lookup_elem:
+ return map_lookup_stack(nfp_prog, meta);
+ default:
+ WARN_ONCE(1, "verifier allowed unsupported function\n");
+ return -EOPNOTSUPP;
+ }
+}
+
static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- wrp_br_special(nfp_prog, BR_UNC, OP_BR_GO_OUT);
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT);
return 0;
}
@@ -1860,6 +2253,10 @@ static const instr_cb_t instr_cb[256] = {
[BPF_JMP | BPF_JGE | BPF_K] = jge_imm,
[BPF_JMP | BPF_JLT | BPF_K] = jlt_imm,
[BPF_JMP | BPF_JLE | BPF_K] = jle_imm,
+ [BPF_JMP | BPF_JSGT | BPF_K] = jsgt_imm,
+ [BPF_JMP | BPF_JSGE | BPF_K] = jsge_imm,
+ [BPF_JMP | BPF_JSLT | BPF_K] = jslt_imm,
+ [BPF_JMP | BPF_JSLE | BPF_K] = jsle_imm,
[BPF_JMP | BPF_JSET | BPF_K] = jset_imm,
[BPF_JMP | BPF_JNE | BPF_K] = jne_imm,
[BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg,
@@ -1867,99 +2264,64 @@ static const instr_cb_t instr_cb[256] = {
[BPF_JMP | BPF_JGE | BPF_X] = jge_reg,
[BPF_JMP | BPF_JLT | BPF_X] = jlt_reg,
[BPF_JMP | BPF_JLE | BPF_X] = jle_reg,
+ [BPF_JMP | BPF_JSGT | BPF_X] = jsgt_reg,
+ [BPF_JMP | BPF_JSGE | BPF_X] = jsge_reg,
+ [BPF_JMP | BPF_JSLT | BPF_X] = jslt_reg,
+ [BPF_JMP | BPF_JSLE | BPF_X] = jsle_reg,
[BPF_JMP | BPF_JSET | BPF_X] = jset_reg,
[BPF_JMP | BPF_JNE | BPF_X] = jne_reg,
+ [BPF_JMP | BPF_CALL] = call,
[BPF_JMP | BPF_EXIT] = goto_out,
};
-/* --- Misc code --- */
-static void br_set_offset(u64 *instr, u16 offset)
-{
- u16 addr_lo, addr_hi;
-
- addr_lo = offset & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
- addr_hi = offset != addr_lo;
- *instr &= ~(OP_BR_ADDR_HI | OP_BR_ADDR_LO);
- *instr |= FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
- *instr |= FIELD_PREP(OP_BR_ADDR_LO, addr_lo);
-}
-
/* --- Assembler logic --- */
static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
{
- struct nfp_insn_meta *meta, *next;
- u32 off, br_idx;
- u32 idx;
+ struct nfp_insn_meta *meta, *jmp_dst;
+ u32 idx, br_idx;
- nfp_for_each_insn_walk2(nfp_prog, meta, next) {
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
if (meta->skip)
continue;
+ if (meta->insn.code == (BPF_JMP | BPF_CALL))
+ continue;
if (BPF_CLASS(meta->insn.code) != BPF_JMP)
continue;
- br_idx = nfp_prog_offset_to_index(nfp_prog, next->off) - 1;
+ if (list_is_last(&meta->l, &nfp_prog->insns))
+ br_idx = nfp_prog->last_bpf_off;
+ else
+ br_idx = list_next_entry(meta, l)->off - 1;
+
if (!nfp_is_br(nfp_prog->prog[br_idx])) {
pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n",
br_idx, meta->insn.code, nfp_prog->prog[br_idx]);
return -ELOOP;
}
/* Leave special branches for later */
- if (FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]))
+ if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) !=
+ RELO_BR_REL)
continue;
- /* Find the target offset in assembler realm */
- off = meta->insn.off;
- if (!off) {
- pr_err("Fixup found zero offset!!\n");
+ if (!meta->jmp_dst) {
+ pr_err("Non-exit jump doesn't have destination info recorded!!\n");
return -ELOOP;
}
- while (off && nfp_meta_has_next(nfp_prog, next)) {
- next = nfp_meta_next(next);
- off--;
- }
- if (off) {
- pr_err("Fixup found too large jump!! %d\n", off);
- return -ELOOP;
- }
+ jmp_dst = meta->jmp_dst;
- if (next->skip) {
+ if (jmp_dst->skip) {
pr_err("Branch landing on removed instruction!!\n");
return -ELOOP;
}
- for (idx = nfp_prog_offset_to_index(nfp_prog, meta->off);
- idx <= br_idx; idx++) {
+ for (idx = meta->off; idx <= br_idx; idx++) {
if (!nfp_is_br(nfp_prog->prog[idx]))
continue;
- br_set_offset(&nfp_prog->prog[idx], next->off);
+ br_set_offset(&nfp_prog->prog[idx], jmp_dst->off);
}
}
- /* Fixup 'goto out's separately, they can be scattered around */
- for (br_idx = 0; br_idx < nfp_prog->prog_len; br_idx++) {
- enum br_special special;
-
- if ((nfp_prog->prog[br_idx] & OP_BR_BASE_MASK) != OP_BR_BASE)
- continue;
-
- special = FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]);
- switch (special) {
- case OP_BR_NORMAL:
- break;
- case OP_BR_GO_OUT:
- br_set_offset(&nfp_prog->prog[br_idx],
- nfp_prog->tgt_out);
- break;
- case OP_BR_GO_ABORT:
- br_set_offset(&nfp_prog->prog[br_idx],
- nfp_prog->tgt_abort);
- break;
- }
-
- nfp_prog->prog[br_idx] &= ~OP_BR_SPECIAL;
- }
-
return 0;
}
@@ -1987,7 +2349,7 @@ static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
/* Target for aborts */
nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
- emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
@@ -2014,7 +2376,7 @@ static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
emit_shf(nfp_prog, reg_b(2),
reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0);
- emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
emit_shf(nfp_prog, reg_b(2),
reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4);
@@ -2033,7 +2395,7 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
/* Target for aborts */
nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
- emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16);
@@ -2054,7 +2416,7 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
emit_shf(nfp_prog, reg_b(2),
reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
- emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
@@ -2105,6 +2467,8 @@ static int nfp_translate(struct nfp_prog *nfp_prog)
nfp_prog->n_translated++;
}
+ nfp_prog->last_bpf_off = nfp_prog_current_offset(nfp_prog) - 1;
+
nfp_outro(nfp_prog);
if (nfp_prog->error)
return nfp_prog->error;
@@ -2173,6 +2537,9 @@ static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
if (next.src_reg || next.dst_reg)
continue;
+ if (meta2->flags & FLAG_INSN_IS_JUMP_DST)
+ continue;
+
meta2->skip = true;
}
}
@@ -2209,40 +2576,294 @@ static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog)
if (next1.imm != 0x20 || next2.imm != 0x20)
continue;
+ if (meta2->flags & FLAG_INSN_IS_JUMP_DST ||
+ meta3->flags & FLAG_INSN_IS_JUMP_DST)
+ continue;
+
meta2->skip = true;
meta3->skip = true;
}
}
+/* load/store pair that forms memory copy sould look like the following:
+ *
+ * ld_width R, [addr_src + offset_src]
+ * st_width [addr_dest + offset_dest], R
+ *
+ * The destination register of load and source register of store should
+ * be the same, load and store should also perform at the same width.
+ * If either of addr_src or addr_dest is stack pointer, we don't do the
+ * CPP optimization as stack is modelled by registers on NFP.
+ */
+static bool
+curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta,
+ struct nfp_insn_meta *st_meta)
+{
+ struct bpf_insn *ld = &ld_meta->insn;
+ struct bpf_insn *st = &st_meta->insn;
+
+ if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta))
+ return false;
+
+ if (ld_meta->ptr.type != PTR_TO_PACKET)
+ return false;
+
+ if (st_meta->ptr.type != PTR_TO_PACKET)
+ return false;
+
+ if (BPF_SIZE(ld->code) != BPF_SIZE(st->code))
+ return false;
+
+ if (ld->dst_reg != st->src_reg)
+ return false;
+
+ /* There is jump to the store insn in this pair. */
+ if (st_meta->flags & FLAG_INSN_IS_JUMP_DST)
+ return false;
+
+ return true;
+}
+
+/* Currently, we only support chaining load/store pairs if:
+ *
+ * - Their address base registers are the same.
+ * - Their address offsets are in the same order.
+ * - They operate at the same memory width.
+ * - There is no jump into the middle of them.
+ */
+static bool
+curr_pair_chain_with_previous(struct nfp_insn_meta *ld_meta,
+ struct nfp_insn_meta *st_meta,
+ struct bpf_insn *prev_ld,
+ struct bpf_insn *prev_st)
+{
+ u8 prev_size, curr_size, prev_ld_base, prev_st_base, prev_ld_dst;
+ struct bpf_insn *ld = &ld_meta->insn;
+ struct bpf_insn *st = &st_meta->insn;
+ s16 prev_ld_off, prev_st_off;
+
+ /* This pair is the start pair. */
+ if (!prev_ld)
+ return true;
+
+ prev_size = BPF_LDST_BYTES(prev_ld);
+ curr_size = BPF_LDST_BYTES(ld);
+ prev_ld_base = prev_ld->src_reg;
+ prev_st_base = prev_st->dst_reg;
+ prev_ld_dst = prev_ld->dst_reg;
+ prev_ld_off = prev_ld->off;
+ prev_st_off = prev_st->off;
+
+ if (ld->dst_reg != prev_ld_dst)
+ return false;
+
+ if (ld->src_reg != prev_ld_base || st->dst_reg != prev_st_base)
+ return false;
+
+ if (curr_size != prev_size)
+ return false;
+
+ /* There is jump to the head of this pair. */
+ if (ld_meta->flags & FLAG_INSN_IS_JUMP_DST)
+ return false;
+
+ /* Both in ascending order. */
+ if (prev_ld_off + prev_size == ld->off &&
+ prev_st_off + prev_size == st->off)
+ return true;
+
+ /* Both in descending order. */
+ if (ld->off + curr_size == prev_ld_off &&
+ st->off + curr_size == prev_st_off)
+ return true;
+
+ return false;
+}
+
+/* Return TRUE if cross memory access happens. Cross memory access means
+ * store area is overlapping with load area that a later load might load
+ * the value from previous store, for this case we can't treat the sequence
+ * as an memory copy.
+ */
+static bool
+cross_mem_access(struct bpf_insn *ld, struct nfp_insn_meta *head_ld_meta,
+ struct nfp_insn_meta *head_st_meta)
+{
+ s16 head_ld_off, head_st_off, ld_off;
+
+ /* Different pointer types does not overlap. */
+ if (head_ld_meta->ptr.type != head_st_meta->ptr.type)
+ return false;
+
+ /* load and store are both PTR_TO_PACKET, check ID info. */
+ if (head_ld_meta->ptr.id != head_st_meta->ptr.id)
+ return true;
+
+ /* Canonicalize the offsets. Turn all of them against the original
+ * base register.
+ */
+ head_ld_off = head_ld_meta->insn.off + head_ld_meta->ptr.off;
+ head_st_off = head_st_meta->insn.off + head_st_meta->ptr.off;
+ ld_off = ld->off + head_ld_meta->ptr.off;
+
+ /* Ascending order cross. */
+ if (ld_off > head_ld_off &&
+ head_ld_off < head_st_off && ld_off >= head_st_off)
+ return true;
+
+ /* Descending order cross. */
+ if (ld_off < head_ld_off &&
+ head_ld_off > head_st_off && ld_off <= head_st_off)
+ return true;
+
+ return false;
+}
+
+/* This pass try to identify the following instructoin sequences.
+ *
+ * load R, [regA + offA]
+ * store [regB + offB], R
+ * load R, [regA + offA + const_imm_A]
+ * store [regB + offB + const_imm_A], R
+ * load R, [regA + offA + 2 * const_imm_A]
+ * store [regB + offB + 2 * const_imm_A], R
+ * ...
+ *
+ * Above sequence is typically generated by compiler when lowering
+ * memcpy. NFP prefer using CPP instructions to accelerate it.
+ */
+static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *head_ld_meta = NULL;
+ struct nfp_insn_meta *head_st_meta = NULL;
+ struct nfp_insn_meta *meta1, *meta2;
+ struct bpf_insn *prev_ld = NULL;
+ struct bpf_insn *prev_st = NULL;
+ u8 count = 0;
+
+ nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
+ struct bpf_insn *ld = &meta1->insn;
+ struct bpf_insn *st = &meta2->insn;
+
+ /* Reset record status if any of the following if true:
+ * - The current insn pair is not load/store.
+ * - The load/store pair doesn't chain with previous one.
+ * - The chained load/store pair crossed with previous pair.
+ * - The chained load/store pair has a total size of memory
+ * copy beyond 128 bytes which is the maximum length a
+ * single NFP CPP command can transfer.
+ */
+ if (!curr_pair_is_memcpy(meta1, meta2) ||
+ !curr_pair_chain_with_previous(meta1, meta2, prev_ld,
+ prev_st) ||
+ (head_ld_meta && (cross_mem_access(ld, head_ld_meta,
+ head_st_meta) ||
+ head_ld_meta->ldst_gather_len >= 128))) {
+ if (!count)
+ continue;
+
+ if (count > 1) {
+ s16 prev_ld_off = prev_ld->off;
+ s16 prev_st_off = prev_st->off;
+ s16 head_ld_off = head_ld_meta->insn.off;
+
+ if (prev_ld_off < head_ld_off) {
+ head_ld_meta->insn.off = prev_ld_off;
+ head_st_meta->insn.off = prev_st_off;
+ head_ld_meta->ldst_gather_len =
+ -head_ld_meta->ldst_gather_len;
+ }
+
+ head_ld_meta->paired_st = &head_st_meta->insn;
+ head_st_meta->skip = true;
+ } else {
+ head_ld_meta->ldst_gather_len = 0;
+ }
+
+ /* If the chain is ended by an load/store pair then this
+ * could serve as the new head of the the next chain.
+ */
+ if (curr_pair_is_memcpy(meta1, meta2)) {
+ head_ld_meta = meta1;
+ head_st_meta = meta2;
+ head_ld_meta->ldst_gather_len =
+ BPF_LDST_BYTES(ld);
+ meta1 = nfp_meta_next(meta1);
+ meta2 = nfp_meta_next(meta2);
+ prev_ld = ld;
+ prev_st = st;
+ count = 1;
+ } else {
+ head_ld_meta = NULL;
+ head_st_meta = NULL;
+ prev_ld = NULL;
+ prev_st = NULL;
+ count = 0;
+ }
+
+ continue;
+ }
+
+ if (!head_ld_meta) {
+ head_ld_meta = meta1;
+ head_st_meta = meta2;
+ } else {
+ meta1->skip = true;
+ meta2->skip = true;
+ }
+
+ head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld);
+ meta1 = nfp_meta_next(meta1);
+ meta2 = nfp_meta_next(meta2);
+ prev_ld = ld;
+ prev_st = st;
+ count++;
+ }
+}
+
static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
{
nfp_bpf_opt_reg_init(nfp_prog);
nfp_bpf_opt_ld_mask(nfp_prog);
nfp_bpf_opt_ld_shift(nfp_prog);
+ nfp_bpf_opt_ldst_gather(nfp_prog);
return 0;
}
-static int nfp_bpf_ustore_calc(struct nfp_prog *nfp_prog, __le64 *ustore)
+static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len)
{
+ __le64 *ustore = (__force __le64 *)prog;
int i;
- for (i = 0; i < nfp_prog->prog_len; i++) {
+ for (i = 0; i < len; i++) {
int err;
- err = nfp_ustore_check_valid_no_ecc(nfp_prog->prog[i]);
+ err = nfp_ustore_check_valid_no_ecc(prog[i]);
if (err)
return err;
- nfp_prog->prog[i] = nfp_ustore_calc_ecc_insn(nfp_prog->prog[i]);
-
- ustore[i] = cpu_to_le64(nfp_prog->prog[i]);
+ ustore[i] = cpu_to_le64(nfp_ustore_calc_ecc_insn(prog[i]));
}
return 0;
}
+static void nfp_bpf_prog_trim(struct nfp_prog *nfp_prog)
+{
+ void *prog;
+
+ prog = kvmalloc_array(nfp_prog->prog_len, sizeof(u64), GFP_KERNEL);
+ if (!prog)
+ return;
+
+ nfp_prog->__prog_alloc_len = nfp_prog->prog_len * sizeof(u64);
+ memcpy(prog, nfp_prog->prog, nfp_prog->__prog_alloc_len);
+ kvfree(nfp_prog->prog);
+ nfp_prog->prog = prog;
+}
+
int nfp_bpf_jit(struct nfp_prog *nfp_prog)
{
int ret;
@@ -2258,5 +2879,102 @@ int nfp_bpf_jit(struct nfp_prog *nfp_prog)
return -EINVAL;
}
- return nfp_bpf_ustore_calc(nfp_prog, (__force __le64 *)nfp_prog->prog);
+ nfp_bpf_prog_trim(nfp_prog);
+
+ return ret;
+}
+
+void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt)
+{
+ struct nfp_insn_meta *meta;
+
+ /* Another pass to record jump information. */
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ u64 code = meta->insn.code;
+
+ if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT &&
+ BPF_OP(code) != BPF_CALL) {
+ struct nfp_insn_meta *dst_meta;
+ unsigned short dst_indx;
+
+ dst_indx = meta->n + 1 + meta->insn.off;
+ dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx,
+ cnt);
+
+ meta->jmp_dst = dst_meta;
+ dst_meta->flags |= FLAG_INSN_IS_JUMP_DST;
+ }
+ }
+}
+
+bool nfp_bpf_supported_opcode(u8 code)
+{
+ return !!instr_cb[code];
+}
+
+void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
+{
+ unsigned int i;
+ u64 *prog;
+ int err;
+
+ prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64),
+ GFP_KERNEL);
+ if (!prog)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < nfp_prog->prog_len; i++) {
+ enum nfp_relo_type special;
+ u32 val;
+
+ special = FIELD_GET(OP_RELO_TYPE, prog[i]);
+ switch (special) {
+ case RELO_NONE:
+ continue;
+ case RELO_BR_REL:
+ br_add_offset(&prog[i], bv->start_off);
+ break;
+ case RELO_BR_GO_OUT:
+ br_set_offset(&prog[i],
+ nfp_prog->tgt_out + bv->start_off);
+ break;
+ case RELO_BR_GO_ABORT:
+ br_set_offset(&prog[i],
+ nfp_prog->tgt_abort + bv->start_off);
+ break;
+ case RELO_BR_NEXT_PKT:
+ br_set_offset(&prog[i], bv->tgt_done);
+ break;
+ case RELO_BR_HELPER:
+ val = br_get_offset(prog[i]);
+ val -= BR_OFF_RELO;
+ switch (val) {
+ case BPF_FUNC_map_lookup_elem:
+ val = nfp_prog->bpf->helpers.map_lookup;
+ break;
+ default:
+ pr_err("relocation of unknown helper %d\n",
+ val);
+ err = -EINVAL;
+ goto err_free_prog;
+ }
+ br_set_offset(&prog[i], val);
+ break;
+ case RELO_IMMED_REL:
+ immed_add_value(&prog[i], bv->start_off);
+ break;
+ }
+
+ prog[i] &= ~OP_RELO_TYPE;
+ }
+
+ err = nfp_bpf_ustore_calc(prog, nfp_prog->prog_len);
+ if (err)
+ goto err_free_prog;
+
+ return prog;
+
+err_free_prog:
+ kfree(prog);
+ return ERR_PTR(err);
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 13190aa09faf..322027792fe8 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -34,10 +34,12 @@
#include <net/pkt_cls.h>
#include "../nfpcore/nfp_cpp.h"
+#include "../nfpcore/nfp_nffw.h"
#include "../nfp_app.h"
#include "../nfp_main.h"
#include "../nfp_net.h"
#include "../nfp_port.h"
+#include "fw.h"
#include "main.h"
static bool nfp_net_ebpf_capable(struct nfp_net *nn)
@@ -52,7 +54,7 @@ static bool nfp_net_ebpf_capable(struct nfp_net *nn)
static int
nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog)
+ struct bpf_prog *prog, struct netlink_ext_ack *extack)
{
bool running, xdp_running;
int ret;
@@ -68,10 +70,10 @@ nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
if (prog && running && !xdp_running)
return -EBUSY;
- ret = nfp_net_bpf_offload(nn, prog, running);
+ ret = nfp_net_bpf_offload(nn, prog, running, extack);
/* Stop offload if replace not possible */
if (ret && prog)
- nfp_bpf_xdp_offload(app, nn, NULL);
+ nfp_bpf_xdp_offload(app, nn, NULL, extack);
nn->dp.bpf_offload_xdp = prog && !ret;
return ret;
@@ -85,16 +87,21 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
static int
nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
{
+ struct nfp_bpf_vnic *bv;
int err;
- nn->app_priv = kzalloc(sizeof(struct nfp_bpf_vnic), GFP_KERNEL);
- if (!nn->app_priv)
+ bv = kzalloc(sizeof(*bv), GFP_KERNEL);
+ if (!bv)
return -ENOMEM;
+ nn->app_priv = bv;
err = nfp_app_nic_vnic_alloc(app, nn, id);
if (err)
goto err_free_priv;
+ bv->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
+ bv->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
+
return 0;
err_free_priv:
kfree(nn->app_priv);
@@ -105,8 +112,6 @@ static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn)
{
struct nfp_bpf_vnic *bv = nn->app_priv;
- if (nn->dp.bpf_offload_xdp)
- nfp_bpf_xdp_offload(app, nn, NULL);
WARN_ON(bv->tc_prog);
kfree(bv);
}
@@ -120,17 +125,29 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
struct nfp_bpf_vnic *bv;
int err;
- if (type != TC_SETUP_CLSBPF ||
- !tc_can_offload(nn->dp.netdev) ||
- !nfp_net_ebpf_capable(nn) ||
- cls_bpf->common.protocol != htons(ETH_P_ALL) ||
- cls_bpf->common.chain_index)
+ if (type != TC_SETUP_CLSBPF) {
+ NL_SET_ERR_MSG_MOD(cls_bpf->common.extack,
+ "only offload of BPF classifiers supported");
+ return -EOPNOTSUPP;
+ }
+ if (!tc_cls_can_offload_and_chain0(nn->dp.netdev, &cls_bpf->common))
+ return -EOPNOTSUPP;
+ if (!nfp_net_ebpf_capable(nn)) {
+ NL_SET_ERR_MSG_MOD(cls_bpf->common.extack,
+ "NFP firmware does not support eBPF offload");
return -EOPNOTSUPP;
+ }
+ if (cls_bpf->common.protocol != htons(ETH_P_ALL)) {
+ NL_SET_ERR_MSG_MOD(cls_bpf->common.extack,
+ "only ETH_P_ALL supported as filter protocol");
+ return -EOPNOTSUPP;
+ }
/* Only support TC direct action */
if (!cls_bpf->exts_integrated ||
tcf_exts_has_actions(cls_bpf->exts)) {
- nn_err(nn, "only direct action with no legacy actions supported\n");
+ NL_SET_ERR_MSG_MOD(cls_bpf->common.extack,
+ "only direct action with no legacy actions supported");
return -EOPNOTSUPP;
}
@@ -147,7 +164,8 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
return 0;
}
- err = nfp_net_bpf_offload(nn, cls_bpf->prog, oldprog);
+ err = nfp_net_bpf_offload(nn, cls_bpf->prog, oldprog,
+ cls_bpf->common.extack);
if (err)
return err;
@@ -191,23 +209,215 @@ static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev,
static bool nfp_bpf_tc_busy(struct nfp_app *app, struct nfp_net *nn)
{
- return nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF;
+ struct nfp_bpf_vnic *bv = nn->app_priv;
+
+ return !!bv->tc_prog;
+}
+
+static int
+nfp_bpf_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ unsigned int max_mtu;
+
+ if (~nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
+ return 0;
+
+ max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
+ if (new_mtu > max_mtu) {
+ nn_info(nn, "BPF offload active, MTU over %u not supported\n",
+ max_mtu);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int
+nfp_bpf_parse_cap_adjust_head(struct nfp_app_bpf *bpf, void __iomem *value,
+ u32 length)
+{
+ struct nfp_bpf_cap_tlv_adjust_head __iomem *cap = value;
+ struct nfp_cpp *cpp = bpf->app->pf->cpp;
+
+ if (length < sizeof(*cap)) {
+ nfp_err(cpp, "truncated adjust_head TLV: %d\n", length);
+ return -EINVAL;
+ }
+
+ bpf->adjust_head.flags = readl(&cap->flags);
+ bpf->adjust_head.off_min = readl(&cap->off_min);
+ bpf->adjust_head.off_max = readl(&cap->off_max);
+ bpf->adjust_head.guaranteed_sub = readl(&cap->guaranteed_sub);
+ bpf->adjust_head.guaranteed_add = readl(&cap->guaranteed_add);
+
+ if (bpf->adjust_head.off_min > bpf->adjust_head.off_max) {
+ nfp_err(cpp, "invalid adjust_head TLV: min > max\n");
+ return -EINVAL;
+ }
+ if (!FIELD_FIT(UR_REG_IMM_MAX, bpf->adjust_head.off_min) ||
+ !FIELD_FIT(UR_REG_IMM_MAX, bpf->adjust_head.off_max)) {
+ nfp_warn(cpp, "disabling adjust_head - driver expects min/max to fit in as immediates\n");
+ memset(&bpf->adjust_head, 0, sizeof(bpf->adjust_head));
+ return 0;
+ }
+
+ return 0;
+}
+
+static int
+nfp_bpf_parse_cap_func(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
+{
+ struct nfp_bpf_cap_tlv_func __iomem *cap = value;
+
+ if (length < sizeof(*cap)) {
+ nfp_err(bpf->app->cpp, "truncated function TLV: %d\n", length);
+ return -EINVAL;
+ }
+
+ switch (readl(&cap->func_id)) {
+ case BPF_FUNC_map_lookup_elem:
+ bpf->helpers.map_lookup = readl(&cap->func_addr);
+ break;
+ }
+
+ return 0;
+}
+
+static int
+nfp_bpf_parse_cap_maps(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
+{
+ struct nfp_bpf_cap_tlv_maps __iomem *cap = value;
+
+ if (length < sizeof(*cap)) {
+ nfp_err(bpf->app->cpp, "truncated maps TLV: %d\n", length);
+ return -EINVAL;
+ }
+
+ bpf->maps.types = readl(&cap->types);
+ bpf->maps.max_maps = readl(&cap->max_maps);
+ bpf->maps.max_elems = readl(&cap->max_elems);
+ bpf->maps.max_key_sz = readl(&cap->max_key_sz);
+ bpf->maps.max_val_sz = readl(&cap->max_val_sz);
+ bpf->maps.max_elem_sz = readl(&cap->max_elem_sz);
+
+ return 0;
+}
+
+static int nfp_bpf_parse_capabilities(struct nfp_app *app)
+{
+ struct nfp_cpp *cpp = app->pf->cpp;
+ struct nfp_cpp_area *area;
+ u8 __iomem *mem, *start;
+
+ mem = nfp_rtsym_map(app->pf->rtbl, "_abi_bpf_capabilities", "bpf.cap",
+ 8, &area);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem) == -ENOENT ? 0 : PTR_ERR(mem);
+
+ start = mem;
+ while (mem - start + 8 < nfp_cpp_area_size(area)) {
+ u8 __iomem *value;
+ u32 type, length;
+
+ type = readl(mem);
+ length = readl(mem + 4);
+ value = mem + 8;
+
+ mem += 8 + length;
+ if (mem - start > nfp_cpp_area_size(area))
+ goto err_release_free;
+
+ switch (type) {
+ case NFP_BPF_CAP_TYPE_FUNC:
+ if (nfp_bpf_parse_cap_func(app->priv, value, length))
+ goto err_release_free;
+ break;
+ case NFP_BPF_CAP_TYPE_ADJUST_HEAD:
+ if (nfp_bpf_parse_cap_adjust_head(app->priv, value,
+ length))
+ goto err_release_free;
+ break;
+ case NFP_BPF_CAP_TYPE_MAPS:
+ if (nfp_bpf_parse_cap_maps(app->priv, value, length))
+ goto err_release_free;
+ break;
+ default:
+ nfp_dbg(cpp, "unknown BPF capability: %d\n", type);
+ break;
+ }
+ }
+ if (mem - start != nfp_cpp_area_size(area)) {
+ nfp_err(cpp, "BPF capabilities left after parsing, parsed:%zd total length:%zu\n",
+ mem - start, nfp_cpp_area_size(area));
+ goto err_release_free;
+ }
+
+ nfp_cpp_area_release_free(area);
+
+ return 0;
+
+err_release_free:
+ nfp_err(cpp, "invalid BPF capabilities at offset:%zd\n", mem - start);
+ nfp_cpp_area_release_free(area);
+ return -EINVAL;
+}
+
+static int nfp_bpf_init(struct nfp_app *app)
+{
+ struct nfp_app_bpf *bpf;
+ int err;
+
+ bpf = kzalloc(sizeof(*bpf), GFP_KERNEL);
+ if (!bpf)
+ return -ENOMEM;
+ bpf->app = app;
+ app->priv = bpf;
+
+ skb_queue_head_init(&bpf->cmsg_replies);
+ init_waitqueue_head(&bpf->cmsg_wq);
+ INIT_LIST_HEAD(&bpf->map_list);
+
+ err = nfp_bpf_parse_capabilities(app);
+ if (err)
+ goto err_free_bpf;
+
+ return 0;
+
+err_free_bpf:
+ kfree(bpf);
+ return err;
+}
+
+static void nfp_bpf_clean(struct nfp_app *app)
+{
+ struct nfp_app_bpf *bpf = app->priv;
+
+ WARN_ON(!skb_queue_empty(&bpf->cmsg_replies));
+ WARN_ON(!list_empty(&bpf->map_list));
+ WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use);
+ kfree(bpf);
}
const struct nfp_app_type app_bpf = {
.id = NFP_APP_BPF_NIC,
.name = "ebpf",
+ .ctrl_cap_mask = 0,
+
+ .init = nfp_bpf_init,
+ .clean = nfp_bpf_clean,
+
+ .change_mtu = nfp_bpf_change_mtu,
+
.extra_cap = nfp_bpf_extra_cap,
.vnic_alloc = nfp_bpf_vnic_alloc,
.vnic_free = nfp_bpf_vnic_free,
+ .ctrl_msg_rx = nfp_bpf_ctrl_msg_rx,
+
.setup_tc = nfp_bpf_setup_tc,
.tc_busy = nfp_bpf_tc_busy,
+ .bpf = nfp_ndo_bpf,
.xdp_offload = nfp_bpf_xdp_offload,
-
- .bpf_verifier_prep = nfp_bpf_verifier_prep,
- .bpf_translate = nfp_bpf_translate,
- .bpf_destroy = nfp_bpf_destroy,
};
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 57b6043177a3..424fe8338105 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 Netronome Systems, Inc.
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -37,22 +37,40 @@
#include <linux/bitfield.h>
#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
+#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/skbuff.h>
#include <linux/types.h>
+#include <linux/wait.h>
#include "../nfp_asm.h"
+#include "fw.h"
-/* For branch fixup logic use up-most byte of branch instruction as scratch
+/* For relocation logic use up-most byte of branch instruction as scratch
* area. Remember to clear this before sending instructions to HW!
*/
-#define OP_BR_SPECIAL 0xff00000000000000ULL
-
-enum br_special {
- OP_BR_NORMAL = 0,
- OP_BR_GO_OUT,
- OP_BR_GO_ABORT,
+#define OP_RELO_TYPE 0xff00000000000000ULL
+
+enum nfp_relo_type {
+ RELO_NONE = 0,
+ /* standard internal jumps */
+ RELO_BR_REL,
+ /* internal jumps to parts of the outro */
+ RELO_BR_GO_OUT,
+ RELO_BR_GO_ABORT,
+ /* external jumps to fixed addresses */
+ RELO_BR_NEXT_PKT,
+ RELO_BR_HELPER,
+ /* immediate relocation against load address */
+ RELO_IMMED_REL,
};
+/* To make absolute relocated branches (branches other than RELO_BR_REL)
+ * distinguishable in user space dumps from normal jumps, add a large offset
+ * to them.
+ */
+#define BR_OFF_RELO 15000
+
enum static_regs {
STATIC_REG_IMM = 21, /* Bank AB */
STATIC_REG_STACK = 22, /* Bank A */
@@ -78,6 +96,89 @@ enum pkt_vec {
#define NFP_BPF_ABI_FLAGS reg_imm(0)
#define NFP_BPF_ABI_FLAG_MARK 1
+/**
+ * struct nfp_app_bpf - bpf app priv structure
+ * @app: backpointer to the app
+ *
+ * @tag_allocator: bitmap of control message tags in use
+ * @tag_alloc_next: next tag bit to allocate
+ * @tag_alloc_last: next tag bit to be freed
+ *
+ * @cmsg_replies: received cmsg replies waiting to be consumed
+ * @cmsg_wq: work queue for waiting for cmsg replies
+ *
+ * @map_list: list of offloaded maps
+ * @maps_in_use: number of currently offloaded maps
+ * @map_elems_in_use: number of elements allocated to offloaded maps
+ *
+ * @adjust_head: adjust head capability
+ * @flags: extra flags for adjust head
+ * @off_min: minimal packet offset within buffer required
+ * @off_max: maximum packet offset within buffer required
+ * @guaranteed_sub: amount of negative adjustment guaranteed possible
+ * @guaranteed_add: amount of positive adjustment guaranteed possible
+ *
+ * @maps: map capability
+ * @types: supported map types
+ * @max_maps: max number of maps supported
+ * @max_elems: max number of entries in each map
+ * @max_key_sz: max size of map key
+ * @max_val_sz: max size of map value
+ * @max_elem_sz: max size of map entry (key + value)
+ *
+ * @helpers: helper addressess for various calls
+ * @map_lookup: map lookup helper address
+ */
+struct nfp_app_bpf {
+ struct nfp_app *app;
+
+ DECLARE_BITMAP(tag_allocator, U16_MAX + 1);
+ u16 tag_alloc_next;
+ u16 tag_alloc_last;
+
+ struct sk_buff_head cmsg_replies;
+ struct wait_queue_head cmsg_wq;
+
+ struct list_head map_list;
+ unsigned int maps_in_use;
+ unsigned int map_elems_in_use;
+
+ struct nfp_bpf_cap_adjust_head {
+ u32 flags;
+ int off_min;
+ int off_max;
+ int guaranteed_sub;
+ int guaranteed_add;
+ } adjust_head;
+
+ struct {
+ u32 types;
+ u32 max_maps;
+ u32 max_elems;
+ u32 max_key_sz;
+ u32 max_val_sz;
+ u32 max_elem_sz;
+ } maps;
+
+ struct {
+ u32 map_lookup;
+ } helpers;
+};
+
+/**
+ * struct nfp_bpf_map - private per-map data attached to BPF maps for offload
+ * @offmap: pointer to the offloaded BPF map
+ * @bpf: back pointer to bpf app private structure
+ * @tid: table id identifying map on datapath
+ * @l: link on the nfp_app_bpf->map_list list
+ */
+struct nfp_bpf_map {
+ struct bpf_offloaded_map *offmap;
+ struct nfp_app_bpf *bpf;
+ u32 tid;
+ struct list_head l;
+};
+
struct nfp_prog;
struct nfp_insn_meta;
typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
@@ -89,23 +190,47 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
#define nfp_meta_next(meta) list_next_entry(meta, l)
#define nfp_meta_prev(meta) list_prev_entry(meta, l)
+#define FLAG_INSN_IS_JUMP_DST BIT(0)
+
/**
* struct nfp_insn_meta - BPF instruction wrapper
* @insn: BPF instruction
* @ptr: pointer type for memory operations
+ * @ldst_gather_len: memcpy length gathered from load/store sequence
+ * @paired_st: the paired store insn at the head of the sequence
* @ptr_not_const: pointer is not always constant
+ * @jmp_dst: destination info for jump instructions
+ * @func_id: function id for call instructions
+ * @arg1: arg1 for call instructions
+ * @arg2: arg2 for call instructions
+ * @arg2_var_off: arg2 changes stack offset on different paths
* @off: index of first generated machine instruction (in nfp_prog.prog)
* @n: eBPF instruction number
+ * @flags: eBPF instruction extra optimization flags
* @skip: skip this instruction (optimized out)
* @double_cb: callback for second part of the instruction
* @l: link on nfp_prog->insns list
*/
struct nfp_insn_meta {
struct bpf_insn insn;
- struct bpf_reg_state ptr;
- bool ptr_not_const;
+ union {
+ struct {
+ struct bpf_reg_state ptr;
+ struct bpf_insn *paired_st;
+ s16 ldst_gather_len;
+ bool ptr_not_const;
+ };
+ struct nfp_insn_meta *jmp_dst;
+ struct {
+ u32 func_id;
+ struct bpf_reg_state arg1;
+ struct bpf_reg_state arg2;
+ bool arg2_var_off;
+ };
+ };
unsigned int off;
unsigned short n;
+ unsigned short flags;
bool skip;
instr_cb_t double_cb;
@@ -134,23 +259,36 @@ static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
return BPF_MODE(meta->insn.code);
}
+static inline bool is_mbpf_load(const struct nfp_insn_meta *meta)
+{
+ return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM);
+}
+
+static inline bool is_mbpf_store(const struct nfp_insn_meta *meta)
+{
+ return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM);
+}
+
/**
* struct nfp_prog - nfp BPF program
+ * @bpf: backpointer to the bpf app priv structure
* @prog: machine code
* @prog_len: number of valid instructions in @prog array
* @__prog_alloc_len: alloc size of @prog array
* @verifier_meta: temporary storage for verifier's insn meta
* @type: BPF program type
- * @start_off: address of the first instruction in the memory
+ * @last_bpf_off: address of the last instruction translated from BPF
* @tgt_out: jump target for normal exit
* @tgt_abort: jump target for abort (e.g. access outside of packet buffer)
- * @tgt_done: jump target to get the next packet
* @n_translated: number of successfully translated instructions (for errors)
* @error: error code if something went wrong
* @stack_depth: max stack depth from the verifier
+ * @adjust_head_location: if program has single adjust head call - the insn no.
* @insns: list of BPF instruction wrappers (struct nfp_insn_meta)
*/
struct nfp_prog {
+ struct nfp_app_bpf *bpf;
+
u64 *prog;
unsigned int prog_len;
unsigned int __prog_alloc_len;
@@ -159,15 +297,15 @@ struct nfp_prog {
enum bpf_prog_type type;
- unsigned int start_off;
+ unsigned int last_bpf_off;
unsigned int tgt_out;
unsigned int tgt_abort;
- unsigned int tgt_done;
unsigned int n_translated;
int error;
unsigned int stack_depth;
+ unsigned int adjust_head_location;
struct list_head insns;
};
@@ -175,26 +313,49 @@ struct nfp_prog {
/**
* struct nfp_bpf_vnic - per-vNIC BPF priv structure
* @tc_prog: currently loaded cls_bpf program
+ * @start_off: address of the first instruction in the memory
+ * @tgt_done: jump target to get the next packet
*/
struct nfp_bpf_vnic {
struct bpf_prog *tc_prog;
+ unsigned int start_off;
+ unsigned int tgt_done;
};
+void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt);
int nfp_bpf_jit(struct nfp_prog *prog);
+bool nfp_bpf_supported_opcode(u8 code);
-extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops;
+extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops;
struct netdev_bpf;
struct nfp_app;
struct nfp_net;
+int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn,
+ struct netdev_bpf *bpf);
int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
- bool old_prog);
-
-int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
- struct netdev_bpf *bpf);
-int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog);
-int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog);
+ bool old_prog, struct netlink_ext_ack *extack);
+
+struct nfp_insn_meta *
+nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int insn_idx, unsigned int n_insns);
+
+void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);
+
+long long int
+nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map);
+void
+nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map);
+int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
+ void *next_key);
+int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
+ void *key, void *value, u64 flags);
+int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key);
+int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
+ void *key, void *value);
+int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
+ void *key, void *next_key);
+
+void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index bc879aeb62d4..0a7732385469 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 Netronome Systems, Inc.
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -36,18 +36,23 @@
* Netronome network device driver: TC offload functions for PF and VF
*/
+#define pr_fmt(fmt) "NFP net bpf: " fmt
+
+#include <linux/bpf.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/list.h>
+#include <linux/mm.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
#include "main.h"
+#include "../nfp_app.h"
#include "../nfp_net_ctrl.h"
#include "../nfp_net.h"
@@ -55,11 +60,10 @@ static int
nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
unsigned int cnt)
{
+ struct nfp_insn_meta *meta;
unsigned int i;
for (i = 0; i < cnt; i++) {
- struct nfp_insn_meta *meta;
-
meta = kzalloc(sizeof(*meta), GFP_KERNEL);
if (!meta)
return -ENOMEM;
@@ -70,6 +74,8 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
list_add_tail(&meta->l, &nfp_prog->insns);
}
+ nfp_bpf_jit_prepare(nfp_prog, cnt);
+
return 0;
}
@@ -84,8 +90,9 @@ static void nfp_prog_free(struct nfp_prog *nfp_prog)
kfree(nfp_prog);
}
-int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
- struct netdev_bpf *bpf)
+static int
+nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
+ struct netdev_bpf *bpf)
{
struct bpf_prog *prog = bpf->verifier.prog;
struct nfp_prog *nfp_prog;
@@ -98,6 +105,7 @@ int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
INIT_LIST_HEAD(&nfp_prog->insns);
nfp_prog->type = prog->type;
+ nfp_prog->bpf = app->priv;
ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
if (ret)
@@ -114,12 +122,12 @@ err_free:
return ret;
}
-int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog)
+static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
unsigned int stack_size;
unsigned int max_instr;
+ int err;
stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
if (prog->aux->stack_depth > stack_size) {
@@ -127,50 +135,179 @@ int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
prog->aux->stack_depth, stack_size);
return -EOPNOTSUPP;
}
-
- nfp_prog->stack_depth = prog->aux->stack_depth;
- nfp_prog->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
- nfp_prog->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
+ nfp_prog->stack_depth = round_up(prog->aux->stack_depth, 4);
max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
- nfp_prog->prog = kmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
+ nfp_prog->prog = kvmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
if (!nfp_prog->prog)
return -ENOMEM;
- return nfp_bpf_jit(nfp_prog);
+ err = nfp_bpf_jit(nfp_prog);
+ if (err)
+ return err;
+
+ prog->aux->offload->jited_len = nfp_prog->prog_len * sizeof(u64);
+ prog->aux->offload->jited_image = nfp_prog->prog;
+
+ return 0;
}
-int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog)
+static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
- kfree(nfp_prog->prog);
+ kvfree(nfp_prog->prog);
nfp_prog_free(nfp_prog);
return 0;
}
-static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
+static int
+nfp_bpf_map_get_next_key(struct bpf_offloaded_map *offmap,
+ void *key, void *next_key)
+{
+ if (!key)
+ return nfp_bpf_ctrl_getfirst_entry(offmap, next_key);
+ return nfp_bpf_ctrl_getnext_entry(offmap, key, next_key);
+}
+
+static int
+nfp_bpf_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
+{
+ if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY)
+ return -EINVAL;
+ return nfp_bpf_ctrl_del_entry(offmap, key);
+}
+
+static const struct bpf_map_dev_ops nfp_bpf_map_ops = {
+ .map_get_next_key = nfp_bpf_map_get_next_key,
+ .map_lookup_elem = nfp_bpf_ctrl_lookup_entry,
+ .map_update_elem = nfp_bpf_ctrl_update_entry,
+ .map_delete_elem = nfp_bpf_map_delete_elem,
+};
+
+static int
+nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
+{
+ struct nfp_bpf_map *nfp_map;
+ long long int res;
+
+ if (!bpf->maps.types)
+ return -EOPNOTSUPP;
+
+ if (offmap->map.map_flags ||
+ offmap->map.numa_node != NUMA_NO_NODE) {
+ pr_info("map flags are not supported\n");
+ return -EINVAL;
+ }
+
+ if (!(bpf->maps.types & 1 << offmap->map.map_type)) {
+ pr_info("map type not supported\n");
+ return -EOPNOTSUPP;
+ }
+ if (bpf->maps.max_maps == bpf->maps_in_use) {
+ pr_info("too many maps for a device\n");
+ return -ENOMEM;
+ }
+ if (bpf->maps.max_elems - bpf->map_elems_in_use <
+ offmap->map.max_entries) {
+ pr_info("map with too many elements: %u, left: %u\n",
+ offmap->map.max_entries,
+ bpf->maps.max_elems - bpf->map_elems_in_use);
+ return -ENOMEM;
+ }
+ if (offmap->map.key_size > bpf->maps.max_key_sz ||
+ offmap->map.value_size > bpf->maps.max_val_sz ||
+ round_up(offmap->map.key_size, 8) +
+ round_up(offmap->map.value_size, 8) > bpf->maps.max_elem_sz) {
+ pr_info("elements don't fit in device constraints\n");
+ return -ENOMEM;
+ }
+
+ nfp_map = kzalloc(sizeof(*nfp_map), GFP_USER);
+ if (!nfp_map)
+ return -ENOMEM;
+
+ offmap->dev_priv = nfp_map;
+ nfp_map->offmap = offmap;
+ nfp_map->bpf = bpf;
+
+ res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map);
+ if (res < 0) {
+ kfree(nfp_map);
+ return res;
+ }
+
+ nfp_map->tid = res;
+ offmap->dev_ops = &nfp_bpf_map_ops;
+ bpf->maps_in_use++;
+ bpf->map_elems_in_use += offmap->map.max_entries;
+ list_add_tail(&nfp_map->l, &bpf->map_list);
+
+ return 0;
+}
+
+static int
+nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
+{
+ struct nfp_bpf_map *nfp_map = offmap->dev_priv;
+
+ nfp_bpf_ctrl_free_map(bpf, nfp_map);
+ list_del_init(&nfp_map->l);
+ bpf->map_elems_in_use -= offmap->map.max_entries;
+ bpf->maps_in_use--;
+ kfree(nfp_map);
+
+ return 0;
+}
+
+int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
+{
+ switch (bpf->command) {
+ case BPF_OFFLOAD_VERIFIER_PREP:
+ return nfp_bpf_verifier_prep(app, nn, bpf);
+ case BPF_OFFLOAD_TRANSLATE:
+ return nfp_bpf_translate(nn, bpf->offload.prog);
+ case BPF_OFFLOAD_DESTROY:
+ return nfp_bpf_destroy(nn, bpf->offload.prog);
+ case BPF_OFFLOAD_MAP_ALLOC:
+ return nfp_bpf_map_alloc(app->priv, bpf->offmap);
+ case BPF_OFFLOAD_MAP_FREE:
+ return nfp_bpf_map_free(app->priv, bpf->offmap);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
unsigned int max_mtu;
dma_addr_t dma_addr;
+ void *img;
int err;
max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
if (max_mtu < nn->dp.netdev->mtu) {
- nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
+ NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with MTU larger than HW packet split boundary");
return -EOPNOTSUPP;
}
- dma_addr = dma_map_single(nn->dp.dev, nfp_prog->prog,
+ img = nfp_bpf_relo_for_vnic(nfp_prog, nn->app_priv);
+ if (IS_ERR(img))
+ return PTR_ERR(img);
+
+ dma_addr = dma_map_single(nn->dp.dev, img,
nfp_prog->prog_len * sizeof(u64),
DMA_TO_DEVICE);
- if (dma_mapping_error(nn->dp.dev, dma_addr))
+ if (dma_mapping_error(nn->dp.dev, dma_addr)) {
+ kfree(img);
return -ENOMEM;
+ }
nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
@@ -178,15 +315,18 @@ static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
/* Load up the JITed code */
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
if (err)
- nn_err(nn, "FW command error while loading BPF: %d\n", err);
+ NL_SET_ERR_MSG_MOD(extack,
+ "FW command error while loading BPF");
dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
DMA_TO_DEVICE);
+ kfree(img);
return err;
}
-static void nfp_net_bpf_start(struct nfp_net *nn)
+static void
+nfp_net_bpf_start(struct nfp_net *nn, struct netlink_ext_ack *extack)
{
int err;
@@ -195,7 +335,8 @@ static void nfp_net_bpf_start(struct nfp_net *nn)
nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
if (err)
- nn_err(nn, "FW command error while enabling BPF: %d\n", err);
+ NL_SET_ERR_MSG_MOD(extack,
+ "FW command error while enabling BPF");
}
static int nfp_net_bpf_stop(struct nfp_net *nn)
@@ -210,12 +351,12 @@ static int nfp_net_bpf_stop(struct nfp_net *nn)
}
int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
- bool old_prog)
+ bool old_prog, struct netlink_ext_ack *extack)
{
int err;
if (prog) {
- struct bpf_dev_offload *offload = prog->aux->offload;
+ struct bpf_prog_offload *offload = prog->aux->offload;
if (!offload)
return -EINVAL;
@@ -228,7 +369,8 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP);
if (!(cap & NFP_NET_BPF_CAP_RELO)) {
- nn_err(nn, "FW does not support live reload\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "FW does not support live reload");
return -EBUSY;
}
}
@@ -240,12 +382,12 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
if (old_prog && !prog)
return nfp_net_bpf_stop(nn);
- err = nfp_net_bpf_load(nn, prog);
+ err = nfp_net_bpf_load(nn, prog, extack);
if (err)
return err;
if (!old_prog)
- nfp_net_bpf_start(nn);
+ nfp_net_bpf_start(nn, extack);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index 8d43491ddd6b..479f602887e9 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 Netronome Systems, Inc.
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -31,16 +31,18 @@
* SOFTWARE.
*/
-#define pr_fmt(fmt) "NFP net bpf: " fmt
-
#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
#include <linux/kernel.h>
#include <linux/pkt_cls.h>
+#include "fw.h"
#include "main.h"
-static struct nfp_insn_meta *
+#define pr_vlog(env, fmt, ...) \
+ bpf_verifier_log_write(env, "[nfp] " fmt, ##__VA_ARGS__)
+
+struct nfp_insn_meta *
nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
unsigned int insn_idx, unsigned int n_insns)
{
@@ -68,6 +70,114 @@ nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
return meta;
}
+static void
+nfp_record_adjust_head(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
+ struct nfp_insn_meta *meta,
+ const struct bpf_reg_state *reg2)
+{
+ unsigned int location = UINT_MAX;
+ int imm;
+
+ /* Datapath usually can give us guarantees on how much adjust head
+ * can be done without the need for any checks. Optimize the simple
+ * case where there is only one adjust head by a constant.
+ */
+ if (reg2->type != SCALAR_VALUE || !tnum_is_const(reg2->var_off))
+ goto exit_set_location;
+ imm = reg2->var_off.value;
+ /* Translator will skip all checks, we need to guarantee min pkt len */
+ if (imm > ETH_ZLEN - ETH_HLEN)
+ goto exit_set_location;
+ if (imm > (int)bpf->adjust_head.guaranteed_add ||
+ imm < -bpf->adjust_head.guaranteed_sub)
+ goto exit_set_location;
+
+ if (nfp_prog->adjust_head_location) {
+ /* Only one call per program allowed */
+ if (nfp_prog->adjust_head_location != meta->n)
+ goto exit_set_location;
+
+ if (meta->arg2.var_off.value != imm)
+ goto exit_set_location;
+ }
+
+ location = meta->n;
+exit_set_location:
+ nfp_prog->adjust_head_location = location;
+}
+
+static int
+nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
+ struct nfp_insn_meta *meta)
+{
+ const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
+ const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2;
+ struct nfp_app_bpf *bpf = nfp_prog->bpf;
+ u32 func_id = meta->insn.imm;
+ s64 off, old_off;
+
+ switch (func_id) {
+ case BPF_FUNC_xdp_adjust_head:
+ if (!bpf->adjust_head.off_max) {
+ pr_vlog(env, "adjust_head not supported by FW\n");
+ return -EOPNOTSUPP;
+ }
+ if (!(bpf->adjust_head.flags & NFP_BPF_ADJUST_HEAD_NO_META)) {
+ pr_vlog(env, "adjust_head: FW requires shifting metadata, not supported by the driver\n");
+ return -EOPNOTSUPP;
+ }
+
+ nfp_record_adjust_head(bpf, nfp_prog, meta, reg2);
+ break;
+
+ case BPF_FUNC_map_lookup_elem:
+ if (!bpf->helpers.map_lookup) {
+ pr_vlog(env, "map_lookup: not supported by FW\n");
+ return -EOPNOTSUPP;
+ }
+ if (reg2->type != PTR_TO_STACK) {
+ pr_vlog(env,
+ "map_lookup: unsupported key ptr type %d\n",
+ reg2->type);
+ return -EOPNOTSUPP;
+ }
+ if (!tnum_is_const(reg2->var_off)) {
+ pr_vlog(env, "map_lookup: variable key pointer\n");
+ return -EOPNOTSUPP;
+ }
+
+ off = reg2->var_off.value + reg2->off;
+ if (-off % 4) {
+ pr_vlog(env,
+ "map_lookup: unaligned stack pointer %lld\n",
+ -off);
+ return -EOPNOTSUPP;
+ }
+
+ /* Rest of the checks is only if we re-parse the same insn */
+ if (!meta->func_id)
+ break;
+
+ old_off = meta->arg2.var_off.value + meta->arg2.off;
+ meta->arg2_var_off |= off != old_off;
+
+ if (meta->arg1.map_ptr != reg1->map_ptr) {
+ pr_vlog(env, "map_lookup: called for different map\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ pr_vlog(env, "unsupported function id: %d\n", func_id);
+ return -EOPNOTSUPP;
+ }
+
+ meta->func_id = func_id;
+ meta->arg1 = *reg1;
+ meta->arg2 = *reg2;
+
+ return 0;
+}
+
static int
nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
struct bpf_verifier_env *env)
@@ -82,7 +192,7 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg0->var_off);
- pr_info("unsupported exit state: %d, var_off: %s\n",
+ pr_vlog(env, "unsupported exit state: %d, var_off: %s\n",
reg0->type, tn_buf);
return -EINVAL;
}
@@ -92,7 +202,7 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
imm <= TC_ACT_REDIRECT &&
imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN &&
imm != TC_ACT_QUEUED) {
- pr_info("unsupported exit state: %d, imm: %llx\n",
+ pr_vlog(env, "unsupported exit state: %d, imm: %llx\n",
reg0->type, imm);
return -EINVAL;
}
@@ -103,12 +213,13 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
static int
nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
struct nfp_insn_meta *meta,
- const struct bpf_reg_state *reg)
+ const struct bpf_reg_state *reg,
+ struct bpf_verifier_env *env)
{
s32 old_off, new_off;
if (!tnum_is_const(reg->var_off)) {
- pr_info("variable ptr stack access\n");
+ pr_vlog(env, "variable ptr stack access\n");
return -EINVAL;
}
@@ -126,7 +237,7 @@ nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
if (old_off % 4 == new_off % 4)
return 0;
- pr_info("stack access changed location was:%d is:%d\n",
+ pr_vlog(env, "stack access changed location was:%d is:%d\n",
old_off, new_off);
return -EINVAL;
}
@@ -140,19 +251,27 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
if (reg->type != PTR_TO_CTX &&
reg->type != PTR_TO_STACK &&
+ reg->type != PTR_TO_MAP_VALUE &&
reg->type != PTR_TO_PACKET) {
- pr_info("unsupported ptr type: %d\n", reg->type);
+ pr_vlog(env, "unsupported ptr type: %d\n", reg->type);
return -EINVAL;
}
if (reg->type == PTR_TO_STACK) {
- err = nfp_bpf_check_stack_access(nfp_prog, meta, reg);
+ err = nfp_bpf_check_stack_access(nfp_prog, meta, reg, env);
if (err)
return err;
}
+ if (reg->type == PTR_TO_MAP_VALUE) {
+ if (is_mbpf_store(meta)) {
+ pr_vlog(env, "map writes not supported\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
- pr_info("ptr type changed for instruction %d -> %d\n",
+ pr_vlog(env, "ptr type changed for instruction %d -> %d\n",
meta->ptr.type, reg->type);
return -EINVAL;
}
@@ -171,25 +290,33 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len);
nfp_prog->verifier_meta = meta;
+ if (!nfp_bpf_supported_opcode(meta->insn.code)) {
+ pr_vlog(env, "instruction %#02x not supported\n",
+ meta->insn.code);
+ return -EINVAL;
+ }
+
if (meta->insn.src_reg >= MAX_BPF_REG ||
meta->insn.dst_reg >= MAX_BPF_REG) {
- pr_err("program uses extended registers - jit hardening?\n");
+ pr_vlog(env, "program uses extended registers - jit hardening?\n");
return -EINVAL;
}
+ if (meta->insn.code == (BPF_JMP | BPF_CALL))
+ return nfp_bpf_check_call(nfp_prog, env, meta);
if (meta->insn.code == (BPF_JMP | BPF_EXIT))
return nfp_bpf_check_exit(nfp_prog, env);
- if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM))
+ if (is_mbpf_load(meta))
return nfp_bpf_check_ptr(nfp_prog, meta, env,
meta->insn.src_reg);
- if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM))
+ if (is_mbpf_store(meta))
return nfp_bpf_check_ptr(nfp_prog, meta, env,
meta->insn.dst_reg);
return 0;
}
-const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = {
+const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = {
.insn_hook = nfp_verify_insn,
};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index c1c595f8bb87..b3567a596fc1 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -81,6 +81,9 @@ static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
return tun_type == NFP_FL_TUNNEL_VXLAN;
+ if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve"))
+ return tun_type == NFP_FL_TUNNEL_GENEVE;
+
return false;
}
@@ -93,13 +96,11 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
size_t act_size = sizeof(struct nfp_fl_output);
struct net_device *out_dev;
u16 tmp_flags;
- int ifindex;
output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
- ifindex = tcf_mirred_ifindex(action);
- out_dev = __dev_get_by_index(dev_net(in_dev), ifindex);
+ out_dev = tcf_mirred_dev(action);
if (!out_dev)
return -EOPNOTSUPP;
@@ -138,11 +139,23 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
return 0;
}
-static bool nfp_fl_supported_tun_port(const struct tc_action *action)
+static enum nfp_flower_tun_type
+nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
+ const struct tc_action *action)
{
struct ip_tunnel_info *tun = tcf_tunnel_info(action);
-
- return tun->key.tp_dst == htons(NFP_FL_VXLAN_PORT);
+ struct nfp_flower_priv *priv = app->priv;
+
+ switch (tun->key.tp_dst) {
+ case htons(NFP_FL_VXLAN_PORT):
+ return NFP_FL_TUNNEL_VXLAN;
+ case htons(NFP_FL_GENEVE_PORT):
+ if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
+ return NFP_FL_TUNNEL_GENEVE;
+ /* FALLTHROUGH */
+ default:
+ return NFP_FL_TUNNEL_NONE;
+ }
}
static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
@@ -167,38 +180,33 @@ static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
}
static int
-nfp_fl_set_vxlan(struct nfp_fl_set_vxlan *set_vxlan,
- const struct tc_action *action,
- struct nfp_fl_pre_tunnel *pre_tun)
+nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
+ const struct tc_action *action,
+ struct nfp_fl_pre_tunnel *pre_tun,
+ enum nfp_flower_tun_type tun_type)
{
- struct ip_tunnel_info *vxlan = tcf_tunnel_info(action);
- size_t act_size = sizeof(struct nfp_fl_set_vxlan);
- u32 tmp_set_vxlan_type_index = 0;
+ size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
+ struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
+ u32 tmp_set_ip_tun_type_index = 0;
/* Currently support one pre-tunnel so index is always 0. */
int pretun_idx = 0;
- if (vxlan->options_len) {
- /* Do not support options e.g. vxlan gpe. */
+ if (ip_tun->options_len)
return -EOPNOTSUPP;
- }
- set_vxlan->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
- set_vxlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+ set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
+ set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
/* Set tunnel type and pre-tunnel index. */
- tmp_set_vxlan_type_index |=
- FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, NFP_FL_TUNNEL_VXLAN) |
+ tmp_set_ip_tun_type_index |=
+ FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) |
FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
- set_vxlan->tun_type_index = cpu_to_be32(tmp_set_vxlan_type_index);
-
- set_vxlan->tun_id = vxlan->key.tun_id;
- set_vxlan->tun_flags = vxlan->key.tun_flags;
- set_vxlan->ipv4_ttl = vxlan->key.ttl;
- set_vxlan->ipv4_tos = vxlan->key.tos;
+ set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
+ set_tun->tun_id = ip_tun->key.tun_id;
/* Complete pre_tunnel action. */
- pre_tun->ipv4_dst = vxlan->key.u.ipv4.dst;
+ pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
return 0;
}
@@ -435,8 +443,8 @@ nfp_flower_loop_action(const struct tc_action *a,
struct net_device *netdev,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt)
{
+ struct nfp_fl_set_ipv4_udp_tun *set_tun;
struct nfp_fl_pre_tunnel *pre_tun;
- struct nfp_fl_set_vxlan *s_vxl;
struct nfp_fl_push_vlan *psh_v;
struct nfp_fl_pop_vlan *pop_v;
struct nfp_fl_output *output;
@@ -484,26 +492,29 @@ nfp_flower_loop_action(const struct tc_action *a,
nfp_fl_push_vlan(psh_v, a);
*a_len += sizeof(struct nfp_fl_push_vlan);
- } else if (is_tcf_tunnel_set(a) && nfp_fl_supported_tun_port(a)) {
+ } else if (is_tcf_tunnel_set(a)) {
+ struct nfp_repr *repr = netdev_priv(netdev);
+ *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
+ if (*tun_type == NFP_FL_TUNNEL_NONE)
+ return -EOPNOTSUPP;
+
/* Pre-tunnel action is required for tunnel encap.
* This checks for next hop entries on NFP.
* If none, the packet falls back before applying other actions.
*/
if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
- sizeof(struct nfp_fl_set_vxlan) > NFP_FL_MAX_A_SIZ)
+ sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ)
return -EOPNOTSUPP;
- *tun_type = NFP_FL_TUNNEL_VXLAN;
pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
*a_len += sizeof(struct nfp_fl_pre_tunnel);
- s_vxl = (struct nfp_fl_set_vxlan *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_set_vxlan(s_vxl, a, pre_tun);
+ set_tun = (void *)&nfp_fl->action_data[*a_len];
+ err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type);
if (err)
return err;
-
- *a_len += sizeof(struct nfp_fl_set_vxlan);
+ *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
} else if (is_tcf_tunnel_release(a)) {
/* Tunnel decap is handled by default so accept action. */
return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index e98bb9cdb6a3..baaea6f1a9d8 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -125,6 +125,27 @@ int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok)
return 0;
}
+int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists)
+{
+ struct nfp_flower_cmsg_portreify *msg;
+ struct sk_buff *skb;
+
+ skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg),
+ NFP_FLOWER_CMSG_TYPE_PORT_REIFY,
+ GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ msg = nfp_flower_cmsg_get_data(skb);
+ msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id);
+ msg->reserved = 0;
+ msg->info = cpu_to_be16(exists);
+
+ nfp_ctrl_tx(repr->app->ctrl, skb);
+
+ return 0;
+}
+
static void
nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
{
@@ -161,6 +182,28 @@ nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
}
static void
+nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_flower_cmsg_portreify *msg;
+ bool exists;
+
+ msg = nfp_flower_cmsg_get_data(skb);
+
+ rcu_read_lock();
+ exists = !!nfp_app_repr_get(app, be32_to_cpu(msg->portnum));
+ rcu_read_unlock();
+ if (!exists) {
+ nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
+ be32_to_cpu(msg->portnum));
+ return;
+ }
+
+ atomic_inc(&priv->reify_replies);
+ wake_up_interruptible(&priv->reify_wait_queue);
+}
+
+static void
nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_flower_cmsg_hdr *cmsg_hdr;
@@ -168,20 +211,14 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
- if (unlikely(cmsg_hdr->version != NFP_FLOWER_CMSG_VER1)) {
- nfp_flower_cmsg_warn(app, "Cannot handle repr control version %u\n",
- cmsg_hdr->version);
- goto out;
- }
-
type = cmsg_hdr->type;
switch (type) {
+ case NFP_FLOWER_CMSG_TYPE_PORT_REIFY:
+ nfp_flower_cmsg_portreify_rx(app, skb);
+ break;
case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
nfp_flower_cmsg_portmod_rx(app, skb);
break;
- case NFP_FLOWER_CMSG_TYPE_FLOW_STATS:
- nfp_flower_rx_flow_stats(app, skb);
- break;
case NFP_FLOWER_CMSG_TYPE_NO_NEIGH:
nfp_tunnel_request_route(app, skb);
break;
@@ -217,7 +254,23 @@ void nfp_flower_cmsg_process_rx(struct work_struct *work)
void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_flower_priv *priv = app->priv;
+ struct nfp_flower_cmsg_hdr *cmsg_hdr;
+
+ cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
- skb_queue_tail(&priv->cmsg_skbs, skb);
- schedule_work(&priv->cmsg_work);
+ if (unlikely(cmsg_hdr->version != NFP_FLOWER_CMSG_VER1)) {
+ nfp_flower_cmsg_warn(app, "Cannot handle repr control version %u\n",
+ cmsg_hdr->version);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_FLOW_STATS) {
+ /* We need to deal with stats updates from HW asap */
+ nfp_flower_rx_flow_stats(app, skb);
+ dev_consume_skb_any(skb);
+ } else {
+ skb_queue_tail(&priv->cmsg_skbs, skb);
+ schedule_work(&priv->cmsg_work);
+ }
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 66070741d55f..adfe474c2cf0 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -41,7 +41,7 @@
#include "../nfp_app.h"
#include "../nfpcore/nfp_cpp.h"
-#define NFP_FLOWER_LAYER_META BIT(0)
+#define NFP_FLOWER_LAYER_EXT_META BIT(0)
#define NFP_FLOWER_LAYER_PORT BIT(1)
#define NFP_FLOWER_LAYER_MAC BIT(2)
#define NFP_FLOWER_LAYER_TP BIT(3)
@@ -50,8 +50,7 @@
#define NFP_FLOWER_LAYER_CT BIT(6)
#define NFP_FLOWER_LAYER_VXLAN BIT(7)
-#define NFP_FLOWER_LAYER_ETHER BIT(3)
-#define NFP_FLOWER_LAYER_ARP BIT(4)
+#define NFP_FLOWER_LAYER2_GENEVE BIT(5)
#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13)
#define NFP_FLOWER_MASK_VLAN_CFI BIT(12)
@@ -108,6 +107,7 @@
enum nfp_flower_tun_type {
NFP_FL_TUNNEL_NONE = 0,
NFP_FL_TUNNEL_VXLAN = 2,
+ NFP_FL_TUNNEL_GENEVE = 4,
};
struct nfp_fl_act_head {
@@ -165,20 +165,6 @@ struct nfp_fl_pop_vlan {
__be16 reserved;
};
-/* Metadata without L2 (1W/4B)
- * ----------------------------------------------------------------
- * 3 2 1
- * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | key_layers | mask_id | reserved |
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- */
-struct nfp_flower_meta_one {
- u8 nfp_flow_key_layer;
- u8 mask_id;
- u16 reserved;
-};
-
struct nfp_fl_pre_tunnel {
struct nfp_fl_act_head head;
__be16 reserved;
@@ -187,16 +173,13 @@ struct nfp_fl_pre_tunnel {
__be32 extra[3];
};
-struct nfp_fl_set_vxlan {
+struct nfp_fl_set_ipv4_udp_tun {
struct nfp_fl_act_head head;
__be16 reserved;
- __be64 tun_id;
+ __be64 tun_id __packed;
__be32 tun_type_index;
- __be16 tun_flags;
- u8 ipv4_ttl;
- u8 ipv4_tos;
- __be32 extra[2];
-} __packed;
+ __be32 extra[3];
+};
/* Metadata with L2 (1W/4B)
* ----------------------------------------------------------------
@@ -209,12 +192,24 @@ struct nfp_fl_set_vxlan {
* NOTE: | TCI |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
-struct nfp_flower_meta_two {
+struct nfp_flower_meta_tci {
u8 nfp_flow_key_layer;
u8 mask_id;
__be16 tci;
};
+/* Extended metadata for additional key_layers (1W/4B)
+ * ----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | nfp_flow_key_layer2 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_ext_meta {
+ __be32 nfp_flow_key_layer2;
+};
+
/* Port details (1W/4B)
* ----------------------------------------------------------------
* 3 2 1
@@ -313,7 +308,7 @@ struct nfp_flower_ipv6 {
struct in6_addr ipv6_dst;
};
-/* Flow Frame VXLAN --> Tunnel details (4W/16B)
+/* Flow Frame IPv4 UDP TUNNEL --> Tunnel details (4W/16B)
* -----------------------------------------------------------------
* 3 2 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
@@ -322,22 +317,17 @@ struct nfp_flower_ipv6 {
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ipv4_addr_dst |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | tun_flags | tos | ttl |
+ * | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | gpe_flags | Reserved | Next Protocol |
+ * | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | VNI | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
-struct nfp_flower_vxlan {
+struct nfp_flower_ipv4_udp_tun {
__be32 ip_src;
__be32 ip_dst;
- __be16 tun_flags;
- u8 tos;
- u8 ttl;
- u8 gpe_flags;
- u8 reserved[2];
- u8 nxt_proto;
+ __be32 reserved[2];
__be32 tun_id;
};
@@ -360,6 +350,7 @@ struct nfp_flower_cmsg_hdr {
enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_FLOW_ADD = 0,
NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2,
+ NFP_FLOWER_CMSG_TYPE_PORT_REIFY = 6,
NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7,
NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8,
NFP_FLOWER_CMSG_TYPE_NO_NEIGH = 10,
@@ -396,6 +387,15 @@ struct nfp_flower_cmsg_portmod {
#define NFP_FLOWER_CMSG_PORTMOD_INFO_LINK BIT(0)
+/* NFP_FLOWER_CMSG_TYPE_PORT_REIFY */
+struct nfp_flower_cmsg_portreify {
+ __be32 portnum;
+ u16 reserved;
+ __be16 info;
+};
+
+#define NFP_FLOWER_CMSG_PORTREIFY_INFO_EXIST BIT(0)
+
enum nfp_flower_cmsg_port_type {
NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC = 0x0,
NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT = 0x1,
@@ -454,6 +454,7 @@ nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx,
unsigned int nbi, unsigned int nbi_port,
unsigned int phys_port);
int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok);
+int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists);
void nfp_flower_cmsg_process_rx(struct work_struct *work);
void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb);
struct sk_buff *
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 8fcc90c0d2d3..742d6f1575b5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -32,6 +32,7 @@
*/
#include <linux/etherdevice.h>
+#include <linux/lockdep.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
@@ -98,7 +99,57 @@ nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
if (port >= reprs->num_reprs)
return NULL;
- return reprs->reprs[port];
+ return rcu_dereference(reprs->reprs[port]);
+}
+
+static int
+nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type,
+ bool exists)
+{
+ struct nfp_reprs *reprs;
+ int i, err, count = 0;
+
+ reprs = rcu_dereference_protected(app->reprs[type],
+ lockdep_is_held(&app->pf->lock));
+ if (!reprs)
+ return 0;
+
+ for (i = 0; i < reprs->num_reprs; i++) {
+ struct net_device *netdev;
+
+ netdev = nfp_repr_get_locked(app, reprs, i);
+ if (netdev) {
+ struct nfp_repr *repr = netdev_priv(netdev);
+
+ err = nfp_flower_cmsg_portreify(repr, exists);
+ if (err)
+ return err;
+ count++;
+ }
+ }
+
+ return count;
+}
+
+static int
+nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ int err;
+
+ if (!tot_repl)
+ return 0;
+
+ lockdep_assert_held(&app->pf->lock);
+ err = wait_event_interruptible_timeout(priv->reify_wait_queue,
+ atomic_read(replies) >= tot_repl,
+ msecs_to_jiffies(10));
+ if (err <= 0) {
+ nfp_warn(app->cpp, "Not all reprs responded to reify\n");
+ return -EIO;
+ }
+
+ return 0;
}
static int
@@ -110,7 +161,6 @@ nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
if (err)
return err;
- netif_carrier_on(repr->netdev);
netif_tx_wake_all_queues(repr->netdev);
return 0;
@@ -119,7 +169,6 @@ nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
static int
nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
{
- netif_carrier_off(repr->netdev);
netif_tx_disable(repr->netdev);
return nfp_flower_cmsg_portmod(repr, false);
@@ -140,6 +189,24 @@ nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
netdev_priv(netdev));
}
+static void
+nfp_flower_repr_netdev_preclean(struct nfp_app *app, struct net_device *netdev)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+ struct nfp_flower_priv *priv = app->priv;
+ atomic_t *replies = &priv->reify_replies;
+ int err;
+
+ atomic_set(replies, 0);
+ err = nfp_flower_cmsg_portreify(repr, false);
+ if (err) {
+ nfp_warn(app->cpp, "Failed to notify firmware about repr destruction\n");
+ return;
+ }
+
+ nfp_flower_wait_repr_reify(app, replies, 1);
+}
+
static void nfp_flower_sriov_disable(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
@@ -157,10 +224,11 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
{
u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp);
struct nfp_flower_priv *priv = app->priv;
+ atomic_t *replies = &priv->reify_replies;
enum nfp_port_type port_type;
struct nfp_reprs *reprs;
+ int i, err, reify_cnt;
const u8 queue = 0;
- int i, err;
port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT :
NFP_PORT_VF_PORT;
@@ -170,19 +238,21 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
return -ENOMEM;
for (i = 0; i < cnt; i++) {
+ struct net_device *repr;
struct nfp_port *port;
u32 port_id;
- reprs->reprs[i] = nfp_repr_alloc(app);
- if (!reprs->reprs[i]) {
+ repr = nfp_repr_alloc(app);
+ if (!repr) {
err = -ENOMEM;
goto err_reprs_clean;
}
+ RCU_INIT_POINTER(reprs->reprs[i], repr);
/* For now we only support 1 PF */
WARN_ON(repr_type == NFP_REPR_TYPE_PF && i);
- port = nfp_port_alloc(app, port_type, reprs->reprs[i]);
+ port = nfp_port_alloc(app, port_type, repr);
if (repr_type == NFP_REPR_TYPE_PF) {
port->pf_id = i;
port->vnic = priv->nn->dp.ctrl_bar;
@@ -193,11 +263,11 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ;
}
- eth_hw_addr_random(reprs->reprs[i]);
+ eth_hw_addr_random(repr);
port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, vnic_type,
i, queue);
- err = nfp_repr_init(app, reprs->reprs[i],
+ err = nfp_repr_init(app, repr,
port_id, port, priv->nn->dp.netdev);
if (err) {
nfp_port_free(port);
@@ -206,14 +276,28 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
nfp_info(app->cpp, "%s%d Representor(%s) created\n",
repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i,
- reprs->reprs[i]->name);
+ repr->name);
}
nfp_app_reprs_set(app, repr_type, reprs);
+ atomic_set(replies, 0);
+ reify_cnt = nfp_flower_reprs_reify(app, repr_type, true);
+ if (reify_cnt < 0) {
+ err = reify_cnt;
+ nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
+ goto err_reprs_remove;
+ }
+
+ err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
+ if (err)
+ goto err_reprs_remove;
+
return 0;
+err_reprs_remove:
+ reprs = nfp_app_reprs_set(app, repr_type, NULL);
err_reprs_clean:
- nfp_reprs_clean_and_free(reprs);
+ nfp_reprs_clean_and_free(app, reprs);
return err;
}
@@ -233,10 +317,11 @@ static int
nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
{
struct nfp_eth_table *eth_tbl = app->pf->eth_tbl;
+ atomic_t *replies = &priv->reify_replies;
struct sk_buff *ctrl_skb;
struct nfp_reprs *reprs;
+ int err, reify_cnt;
unsigned int i;
- int err;
ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count);
if (!ctrl_skb)
@@ -250,17 +335,18 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
for (i = 0; i < eth_tbl->count; i++) {
unsigned int phys_port = eth_tbl->ports[i].index;
+ struct net_device *repr;
struct nfp_port *port;
u32 cmsg_port_id;
- reprs->reprs[phys_port] = nfp_repr_alloc(app);
- if (!reprs->reprs[phys_port]) {
+ repr = nfp_repr_alloc(app);
+ if (!repr) {
err = -ENOMEM;
goto err_reprs_clean;
}
+ RCU_INIT_POINTER(reprs->reprs[phys_port], repr);
- port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT,
- reprs->reprs[phys_port]);
+ port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
if (IS_ERR(port)) {
err = PTR_ERR(port);
goto err_reprs_clean;
@@ -271,11 +357,11 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
goto err_reprs_clean;
}
- SET_NETDEV_DEV(reprs->reprs[phys_port], &priv->nn->pdev->dev);
+ SET_NETDEV_DEV(repr, &priv->nn->pdev->dev);
nfp_net_get_mac_addr(app->pf, port);
cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port);
- err = nfp_repr_init(app, reprs->reprs[phys_port],
+ err = nfp_repr_init(app, repr,
cmsg_port_id, port, priv->nn->dp.netdev);
if (err) {
nfp_port_free(port);
@@ -288,23 +374,37 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
phys_port);
nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n",
- phys_port, reprs->reprs[phys_port]->name);
+ phys_port, repr->name);
}
nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
- /* The MAC_REPR control message should be sent after the MAC
+ /* The REIFY/MAC_REPR control messages should be sent after the MAC
* representors are registered using nfp_app_reprs_set(). This is
* because the firmware may respond with control messages for the
* MAC representors, f.e. to provide the driver with information
* about their state, and without registration the driver will drop
* any such messages.
*/
+ atomic_set(replies, 0);
+ reify_cnt = nfp_flower_reprs_reify(app, NFP_REPR_TYPE_PHYS_PORT, true);
+ if (reify_cnt < 0) {
+ err = reify_cnt;
+ nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
+ goto err_reprs_remove;
+ }
+
+ err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
+ if (err)
+ goto err_reprs_remove;
+
nfp_ctrl_tx(app->ctrl, ctrl_skb);
return 0;
+err_reprs_remove:
+ reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL);
err_reprs_clean:
- nfp_reprs_clean_and_free(reprs);
+ nfp_reprs_clean_and_free(app, reprs);
err_free_ctrl_skb:
kfree_skb(ctrl_skb);
return err;
@@ -381,7 +481,7 @@ static int nfp_flower_init(struct nfp_app *app)
{
const struct nfp_pf *pf = app->pf;
struct nfp_flower_priv *app_priv;
- u64 version;
+ u64 version, features;
int err;
if (!pf->eth_tbl) {
@@ -419,11 +519,20 @@ static int nfp_flower_init(struct nfp_app *app)
app_priv->app = app;
skb_queue_head_init(&app_priv->cmsg_skbs);
INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx);
+ init_waitqueue_head(&app_priv->reify_wait_queue);
err = nfp_flower_metadata_init(app);
if (err)
goto err_free_app_priv;
+ /* Extract the extra features supported by the firmware. */
+ features = nfp_rtsym_read_le(app->pf->rtbl,
+ "_abi_flower_extra_features", &err);
+ if (err)
+ app_priv->flower_ext_feats = 0;
+ else
+ app_priv->flower_ext_feats = features;
+
return 0;
err_free_app_priv:
@@ -456,6 +565,8 @@ static void nfp_flower_stop(struct nfp_app *app)
const struct nfp_app_type app_flower = {
.id = NFP_APP_FLOWER_NIC,
.name = "flower",
+
+ .ctrl_cap_mask = ~0U,
.ctrl_has_meta = true,
.extra_cap = nfp_flower_extra_cap,
@@ -468,6 +579,7 @@ const struct nfp_app_type app_flower = {
.vnic_clean = nfp_flower_vnic_clean,
.repr_init = nfp_flower_repr_netdev_init,
+ .repr_preclean = nfp_flower_repr_netdev_preclean,
.repr_clean = nfp_flower_repr_netdev_clean,
.repr_open = nfp_flower_repr_netdev_open,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index e6b26c5ae6e0..332ff0fdc038 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -34,6 +34,8 @@
#ifndef __NFP_FLOWER_H__
#define __NFP_FLOWER_H__ 1
+#include "cmsg.h"
+
#include <linux/circ_buf.h>
#include <linux/hashtable.h>
#include <linux/time64.h>
@@ -58,6 +60,10 @@ struct nfp_app;
#define NFP_FL_MASK_ID_LOCATION 1
#define NFP_FL_VXLAN_PORT 4789
+#define NFP_FL_GENEVE_PORT 6081
+
+/* Extra features bitmap. */
+#define NFP_FL_FEATS_GENEVE BIT(0)
struct nfp_fl_mask_id {
struct circ_buf mask_id_free_list;
@@ -77,6 +83,7 @@ struct nfp_fl_stats_id {
* @nn: Pointer to vNIC
* @mask_id_seed: Seed used for mask hash table
* @flower_version: HW version of flower
+ * @flower_ext_feats: Bitmap of extra features the HW supports
* @stats_ids: List of free stats ids
* @mask_ids: List of free mask ids
* @mask_table: Hash table used to store masks
@@ -95,12 +102,16 @@ struct nfp_fl_stats_id {
* @nfp_mac_off_count: Number of MACs in address list
* @nfp_tun_mac_nb: Notifier to monitor link state
* @nfp_tun_neigh_nb: Notifier to monitor neighbour state
+ * @reify_replies: atomically stores the number of replies received
+ * from firmware for repr reify
+ * @reify_wait_queue: wait queue for repr reify response counting
*/
struct nfp_flower_priv {
struct nfp_app *app;
struct nfp_net *nn;
u32 mask_id_seed;
u64 flower_version;
+ u64 flower_ext_feats;
struct nfp_fl_stats_id stats_ids;
struct nfp_fl_mask_id mask_ids;
DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
@@ -119,6 +130,8 @@ struct nfp_flower_priv {
int nfp_mac_off_count;
struct notifier_block nfp_tun_mac_nb;
struct notifier_block nfp_tun_neigh_nb;
+ atomic_t reify_replies;
+ wait_queue_head_t reify_wait_queue;
};
struct nfp_fl_key_ls {
@@ -172,7 +185,8 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
- struct nfp_fl_payload *nfp_flow);
+ struct nfp_fl_payload *nfp_flow,
+ enum nfp_flower_tun_type tun_type);
int nfp_flower_compile_action(struct tc_cls_flower_offload *flow,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 60614d4f0e22..37c2ecae2a7a 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -38,7 +38,7 @@
#include "main.h"
static void
-nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
+nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame,
struct tc_cls_flower_offload *flow, u8 key_type,
bool mask_version)
{
@@ -46,7 +46,7 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
struct flow_dissector_key_vlan *flow_vlan;
u16 tmp_tci;
- memset(frame, 0, sizeof(struct nfp_flower_meta_two));
+ memset(frame, 0, sizeof(struct nfp_flower_meta_tci));
/* Populate the metadata frame. */
frame->nfp_flow_key_layer = key_type;
frame->mask_id = ~0;
@@ -68,11 +68,9 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
}
static void
-nfp_flower_compile_meta(struct nfp_flower_meta_one *frame, u8 key_type)
+nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
{
- frame->nfp_flow_key_layer = key_type;
- frame->mask_id = 0;
- frame->reserved = 0;
+ frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext);
}
static int
@@ -224,16 +222,15 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
}
static void
-nfp_flower_compile_vxlan(struct nfp_flower_vxlan *frame,
- struct tc_cls_flower_offload *flow,
- bool mask_version, __be32 *tun_dst)
+nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
+ struct tc_cls_flower_offload *flow,
+ bool mask_version)
{
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_ipv4_addrs *vxlan_ips;
+ struct flow_dissector_key_ipv4_addrs *tun_ips;
struct flow_dissector_key_keyid *vni;
- /* Wildcard TOS/TTL/GPE_FLAGS/NXT_PROTO for now. */
- memset(frame, 0, sizeof(struct nfp_flower_vxlan));
+ memset(frame, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
if (dissector_uses_key(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_KEYID)) {
@@ -248,80 +245,68 @@ nfp_flower_compile_vxlan(struct nfp_flower_vxlan *frame,
if (dissector_uses_key(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
- vxlan_ips =
+ tun_ips =
skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
target);
- frame->ip_src = vxlan_ips->src;
- frame->ip_dst = vxlan_ips->dst;
- *tun_dst = vxlan_ips->dst;
+ frame->ip_src = tun_ips->src;
+ frame->ip_dst = tun_ips->dst;
}
}
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
- struct nfp_fl_payload *nfp_flow)
+ struct nfp_fl_payload *nfp_flow,
+ enum nfp_flower_tun_type tun_type)
{
- enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
- __be32 tun_dst, tun_dst_mask = 0;
struct nfp_repr *netdev_repr;
int err;
u8 *ext;
u8 *msk;
- if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN)
- tun_type = NFP_FL_TUNNEL_VXLAN;
-
memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
memset(nfp_flow->mask_data, 0, key_ls->key_size);
ext = nfp_flow->unmasked_data;
msk = nfp_flow->mask_data;
- if (NFP_FLOWER_LAYER_PORT & key_ls->key_layer) {
- /* Populate Exact Metadata. */
- nfp_flower_compile_meta_tci((struct nfp_flower_meta_two *)ext,
- flow, key_ls->key_layer, false);
- /* Populate Mask Metadata. */
- nfp_flower_compile_meta_tci((struct nfp_flower_meta_two *)msk,
- flow, key_ls->key_layer, true);
- ext += sizeof(struct nfp_flower_meta_two);
- msk += sizeof(struct nfp_flower_meta_two);
-
- /* Populate Exact Port data. */
- err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
- nfp_repr_get_port_id(netdev),
- false, tun_type);
- if (err)
- return err;
-
- /* Populate Mask Port Data. */
- err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
- nfp_repr_get_port_id(netdev),
- true, tun_type);
- if (err)
- return err;
-
- ext += sizeof(struct nfp_flower_in_port);
- msk += sizeof(struct nfp_flower_in_port);
- } else {
- /* Populate Exact Metadata. */
- nfp_flower_compile_meta((struct nfp_flower_meta_one *)ext,
- key_ls->key_layer);
- /* Populate Mask Metadata. */
- nfp_flower_compile_meta((struct nfp_flower_meta_one *)msk,
- key_ls->key_layer);
- ext += sizeof(struct nfp_flower_meta_one);
- msk += sizeof(struct nfp_flower_meta_one);
- }
- if (NFP_FLOWER_LAYER_META & key_ls->key_layer) {
- /* Additional Metadata Fields.
- * Currently unsupported.
- */
- return -EOPNOTSUPP;
+ /* Populate Exact Metadata. */
+ nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
+ flow, key_ls->key_layer, false);
+ /* Populate Mask Metadata. */
+ nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)msk,
+ flow, key_ls->key_layer, true);
+ ext += sizeof(struct nfp_flower_meta_tci);
+ msk += sizeof(struct nfp_flower_meta_tci);
+
+ /* Populate Extended Metadata if Required. */
+ if (NFP_FLOWER_LAYER_EXT_META & key_ls->key_layer) {
+ nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)ext,
+ key_ls->key_layer_two);
+ nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
+ key_ls->key_layer_two);
+ ext += sizeof(struct nfp_flower_ext_meta);
+ msk += sizeof(struct nfp_flower_ext_meta);
}
+ /* Populate Exact Port data. */
+ err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
+ nfp_repr_get_port_id(netdev),
+ false, tun_type);
+ if (err)
+ return err;
+
+ /* Populate Mask Port Data. */
+ err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
+ nfp_repr_get_port_id(netdev),
+ true, tun_type);
+ if (err)
+ return err;
+
+ ext += sizeof(struct nfp_flower_in_port);
+ msk += sizeof(struct nfp_flower_in_port);
+
if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
/* Populate Exact MAC Data. */
nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
@@ -366,15 +351,17 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
msk += sizeof(struct nfp_flower_ipv6);
}
- if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN) {
+ if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
+ key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
+ __be32 tun_dst;
+
/* Populate Exact VXLAN Data. */
- nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)ext,
- flow, false, &tun_dst);
+ nfp_flower_compile_ipv4_udp_tun((void *)ext, flow, false);
/* Populate Mask VXLAN Data. */
- nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)msk,
- flow, true, &tun_dst_mask);
- ext += sizeof(struct nfp_flower_vxlan);
- msk += sizeof(struct nfp_flower_vxlan);
+ nfp_flower_compile_ipv4_udp_tun((void *)msk, flow, true);
+ tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ip_dst;
+ ext += sizeof(struct nfp_flower_ipv4_udp_tun);
+ msk += sizeof(struct nfp_flower_ipv4_udp_tun);
/* Configure tunnel end point MAC. */
if (nfp_netdev_is_nfp_repr(netdev)) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 553f94f55dce..08c4c6dc5f7f 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -130,12 +130,15 @@ static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
}
static int
-nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
+nfp_flower_calculate_key_layers(struct nfp_app *app,
+ struct nfp_fl_key_ls *ret_key_ls,
struct tc_cls_flower_offload *flow,
- bool egress)
+ bool egress,
+ enum nfp_flower_tun_type *tun_type)
{
struct flow_dissector_key_basic *mask_basic = NULL;
struct flow_dissector_key_basic *key_basic = NULL;
+ struct nfp_flower_priv *priv = app->priv;
u32 key_layer_two;
u8 key_layer;
int key_size;
@@ -150,10 +153,15 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
return -EOPNOTSUPP;
key_layer_two = 0;
- key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC;
- key_size = sizeof(struct nfp_flower_meta_one) +
- sizeof(struct nfp_flower_in_port) +
- sizeof(struct nfp_flower_mac_mpls);
+ key_layer = NFP_FLOWER_LAYER_PORT;
+ key_size = sizeof(struct nfp_flower_meta_tci) +
+ sizeof(struct nfp_flower_in_port);
+
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
+ dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
+ key_layer |= NFP_FLOWER_LAYER_MAC;
+ key_size += sizeof(struct nfp_flower_mac_mpls);
+ }
if (dissector_uses_key(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
@@ -192,12 +200,27 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
FLOW_DISSECTOR_KEY_ENC_PORTS,
flow->key);
- if (mask_enc_ports->dst != cpu_to_be16(~0) ||
- enc_ports->dst != htons(NFP_FL_VXLAN_PORT))
+ if (mask_enc_ports->dst != cpu_to_be16(~0))
return -EOPNOTSUPP;
- key_layer |= NFP_FLOWER_LAYER_VXLAN;
- key_size += sizeof(struct nfp_flower_vxlan);
+ switch (enc_ports->dst) {
+ case htons(NFP_FL_VXLAN_PORT):
+ *tun_type = NFP_FL_TUNNEL_VXLAN;
+ key_layer |= NFP_FLOWER_LAYER_VXLAN;
+ key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+ break;
+ case htons(NFP_FL_GENEVE_PORT):
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
+ return -EOPNOTSUPP;
+ *tun_type = NFP_FL_TUNNEL_GENEVE;
+ key_layer |= NFP_FLOWER_LAYER_EXT_META;
+ key_size += sizeof(struct nfp_flower_ext_meta);
+ key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
+ key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
} else if (egress) {
/* Reject non tunnel matches offloaded to egress repr. */
return -EOPNOTSUPP;
@@ -325,6 +348,7 @@ static int
nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_flower_offload *flow, bool egress)
{
+ enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *flow_pay;
struct nfp_fl_key_ls *key_layer;
@@ -334,7 +358,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (!key_layer)
return -ENOMEM;
- err = nfp_flower_calculate_key_layers(key_layer, flow, egress);
+ err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress,
+ &tun_type);
if (err)
goto err_free_key_ls;
@@ -344,7 +369,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
goto err_free_key_ls;
}
- err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay);
+ err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
+ tun_type);
if (err)
goto err_destroy_flow;
@@ -457,8 +483,7 @@ static int
nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_flower_offload *flower, bool egress)
{
- if (!eth_proto_is_802_3(flower->common.protocol) ||
- flower->common.chain_index)
+ if (!eth_proto_is_802_3(flower->common.protocol))
return -EOPNOTSUPP;
switch (flower->command) {
@@ -478,7 +503,7 @@ int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
{
struct nfp_repr *repr = cb_priv;
- if (!tc_can_offload(repr->netdev))
+ if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -495,7 +520,7 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
{
struct nfp_repr *repr = cb_priv;
- if (!tc_can_offload(repr->netdev))
+ if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
return -EOPNOTSUPP;
switch (type) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
index 955a9f44d244..6aedef0ad433 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -32,6 +32,8 @@
*/
#include <linux/bug.h>
+#include <linux/lockdep.h>
+#include <linux/rcupdate.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
@@ -99,13 +101,19 @@ nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size, gfp_t priority)
}
struct nfp_reprs *
+nfp_reprs_get_locked(struct nfp_app *app, enum nfp_repr_type type)
+{
+ return rcu_dereference_protected(app->reprs[type],
+ lockdep_is_held(&app->pf->lock));
+}
+
+struct nfp_reprs *
nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type,
struct nfp_reprs *reprs)
{
struct nfp_reprs *old;
- old = rcu_dereference_protected(app->reprs[type],
- lockdep_is_held(&app->pf->lock));
+ old = nfp_reprs_get_locked(app, type);
rcu_assign_pointer(app->reprs[type], reprs);
return old;
@@ -116,7 +124,7 @@ struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
struct nfp_app *app;
if (id >= ARRAY_SIZE(apps) || !apps[id]) {
- nfp_err(pf->cpp, "failed to find app with ID 0x%02hhx\n", id);
+ nfp_err(pf->cpp, "unknown FW app ID 0x%02hhx, driver too old or support for FW not built in\n", id);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index 0e5e0305ad1c..437964afa8ee 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -43,6 +43,7 @@
struct bpf_prog;
struct net_device;
struct netdev_bpf;
+struct netlink_ext_ack;
struct pci_dev;
struct sk_buff;
struct sk_buff;
@@ -66,6 +67,9 @@ extern const struct nfp_app_type app_flower;
* struct nfp_app_type - application definition
* @id: application ID
* @name: application name
+ * @ctrl_cap_mask: ctrl vNIC capability mask, allows disabling features like
+ * IRQMOD which are on by default but counter-productive for
+ * control messages which are often latency-sensitive
* @ctrl_has_meta: control messages have prepend of type:5/port:CTRL
*
* Callbacks
@@ -77,18 +81,20 @@ extern const struct nfp_app_type app_flower;
* @vnic_init: vNIC netdev was registered
* @vnic_clean: vNIC netdev about to be unregistered
* @repr_init: representor about to be registered
+ * @repr_preclean: representor about to unregistered, executed before app
+ * reference to the it is removed
* @repr_clean: representor about to be unregistered
* @repr_open: representor netdev open callback
* @repr_stop: representor netdev stop callback
+ * @change_mtu: MTU change on a netdev has been requested (veto-only, change
+ * is not guaranteed to be committed)
* @start: start application logic
* @stop: stop application logic
* @ctrl_msg_rx: control message handler
* @setup_tc: setup TC ndo
* @tc_busy: TC HW offload busy (rules loaded)
+ * @bpf: BPF ndo offload-related calls
* @xdp_offload: offload an XDP program
- * @bpf_verifier_prep: verifier prep for dev-specific BPF programs
- * @bpf_translate: translate call for dev-specific BPF programs
- * @bpf_destroy: destroy for dev-specific BPF programs
* @eswitch_mode_get: get SR-IOV eswitch mode
* @sriov_enable: app-specific sriov initialisation
* @sriov_disable: app-specific sriov clean-up
@@ -98,6 +104,7 @@ struct nfp_app_type {
enum nfp_app_id id;
const char *name;
+ u32 ctrl_cap_mask;
bool ctrl_has_meta;
int (*init)(struct nfp_app *app);
@@ -112,11 +119,15 @@ struct nfp_app_type {
void (*vnic_clean)(struct nfp_app *app, struct nfp_net *nn);
int (*repr_init)(struct nfp_app *app, struct net_device *netdev);
+ void (*repr_preclean)(struct nfp_app *app, struct net_device *netdev);
void (*repr_clean)(struct nfp_app *app, struct net_device *netdev);
int (*repr_open)(struct nfp_app *app, struct nfp_repr *repr);
int (*repr_stop)(struct nfp_app *app, struct nfp_repr *repr);
+ int (*change_mtu)(struct nfp_app *app, struct net_device *netdev,
+ int new_mtu);
+
int (*start)(struct nfp_app *app);
void (*stop)(struct nfp_app *app);
@@ -125,14 +136,11 @@ struct nfp_app_type {
int (*setup_tc)(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data);
bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn);
+ int (*bpf)(struct nfp_app *app, struct nfp_net *nn,
+ struct netdev_bpf *xdp);
int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog);
- int (*bpf_verifier_prep)(struct nfp_app *app, struct nfp_net *nn,
- struct netdev_bpf *bpf);
- int (*bpf_translate)(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog);
- int (*bpf_destroy)(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog);
+ struct bpf_prog *prog,
+ struct netlink_ext_ack *extack);
int (*sriov_enable)(struct nfp_app *app, int num_vfs);
void (*sriov_disable)(struct nfp_app *app);
@@ -163,6 +171,7 @@ struct nfp_app {
void *priv;
};
+bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
static inline int nfp_app_init(struct nfp_app *app)
@@ -226,12 +235,27 @@ nfp_app_repr_init(struct nfp_app *app, struct net_device *netdev)
}
static inline void
+nfp_app_repr_preclean(struct nfp_app *app, struct net_device *netdev)
+{
+ if (app->type->repr_preclean)
+ app->type->repr_preclean(app, netdev);
+}
+
+static inline void
nfp_app_repr_clean(struct nfp_app *app, struct net_device *netdev)
{
if (app->type->repr_clean)
app->type->repr_clean(app, netdev);
}
+static inline int
+nfp_app_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
+{
+ if (!app || !app->type->change_mtu)
+ return 0;
+ return app->type->change_mtu(app, netdev, new_mtu);
+}
+
static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
{
app->ctrl = ctrl;
@@ -293,39 +317,29 @@ static inline int nfp_app_setup_tc(struct nfp_app *app,
return app->type->setup_tc(app, netdev, type, type_data);
}
-static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog)
+static inline int nfp_app_bpf(struct nfp_app *app, struct nfp_net *nn,
+ struct netdev_bpf *bpf)
{
- if (!app || !app->type->xdp_offload)
- return -EOPNOTSUPP;
- return app->type->xdp_offload(app, nn, prog);
+ if (!app || !app->type->bpf)
+ return -EINVAL;
+ return app->type->bpf(app, nn, bpf);
}
-static inline int
-nfp_app_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
- struct netdev_bpf *bpf)
+static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
{
- if (!app || !app->type->bpf_verifier_prep)
+ if (!app || !app->type->xdp_offload)
return -EOPNOTSUPP;
- return app->type->bpf_verifier_prep(app, nn, bpf);
+ return app->type->xdp_offload(app, nn, prog, extack);
}
-static inline int
-nfp_app_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog)
+static inline bool __nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
{
- if (!app || !app->type->bpf_translate)
- return -EOPNOTSUPP;
- return app->type->bpf_translate(app, nn, prog);
-}
+ trace_devlink_hwmsg(priv_to_devlink(app->pf), false, 0,
+ skb->data, skb->len);
-static inline int
-nfp_app_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog)
-{
- if (!app || !app->type->bpf_destroy)
- return -EOPNOTSUPP;
- return app->type->bpf_destroy(app, nn, prog);
+ return __nfp_ctrl_tx(app->ctrl, skb);
}
static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
@@ -378,6 +392,8 @@ static inline struct net_device *nfp_app_repr_get(struct nfp_app *app, u32 id)
struct nfp_app *nfp_app_from_netdev(struct net_device *netdev);
struct nfp_reprs *
+nfp_reprs_get_locked(struct nfp_app *app, enum nfp_repr_type type);
+struct nfp_reprs *
nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type,
struct nfp_reprs *reprs);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
index 830f6de25f47..3f6952b66a49 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
@@ -41,6 +41,7 @@
const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
[CMD_TGT_WRITE8_SWAP] = { 0x02, 0x42 },
+ [CMD_TGT_WRITE32_SWAP] = { 0x02, 0x5f },
[CMD_TGT_READ8] = { 0x01, 0x43 },
[CMD_TGT_READ32] = { 0x00, 0x5c },
[CMD_TGT_READ32_LE] = { 0x01, 0x5c },
@@ -49,6 +50,94 @@ const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
[CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 },
};
+static bool unreg_is_imm(u16 reg)
+{
+ return (reg & UR_REG_IMM) == UR_REG_IMM;
+}
+
+u16 br_get_offset(u64 instr)
+{
+ u16 addr_lo, addr_hi;
+
+ addr_lo = FIELD_GET(OP_BR_ADDR_LO, instr);
+ addr_hi = FIELD_GET(OP_BR_ADDR_HI, instr);
+
+ return (addr_hi * ((OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO)) + 1)) |
+ addr_lo;
+}
+
+void br_set_offset(u64 *instr, u16 offset)
+{
+ u16 addr_lo, addr_hi;
+
+ addr_lo = offset & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
+ addr_hi = offset != addr_lo;
+ *instr &= ~(OP_BR_ADDR_HI | OP_BR_ADDR_LO);
+ *instr |= FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
+ *instr |= FIELD_PREP(OP_BR_ADDR_LO, addr_lo);
+}
+
+void br_add_offset(u64 *instr, u16 offset)
+{
+ u16 addr;
+
+ addr = br_get_offset(*instr);
+ br_set_offset(instr, addr + offset);
+}
+
+static bool immed_can_modify(u64 instr)
+{
+ if (FIELD_GET(OP_IMMED_INV, instr) ||
+ FIELD_GET(OP_IMMED_SHIFT, instr) ||
+ FIELD_GET(OP_IMMED_WIDTH, instr) != IMMED_WIDTH_ALL) {
+ pr_err("Can't decode/encode immed!\n");
+ return false;
+ }
+ return true;
+}
+
+u16 immed_get_value(u64 instr)
+{
+ u16 reg;
+
+ if (!immed_can_modify(instr))
+ return 0;
+
+ reg = FIELD_GET(OP_IMMED_A_SRC, instr);
+ if (!unreg_is_imm(reg))
+ reg = FIELD_GET(OP_IMMED_B_SRC, instr);
+
+ return (reg & 0xff) | FIELD_GET(OP_IMMED_IMM, instr);
+}
+
+void immed_set_value(u64 *instr, u16 immed)
+{
+ if (!immed_can_modify(*instr))
+ return;
+
+ if (unreg_is_imm(FIELD_GET(OP_IMMED_A_SRC, *instr))) {
+ *instr &= ~FIELD_PREP(OP_IMMED_A_SRC, 0xff);
+ *instr |= FIELD_PREP(OP_IMMED_A_SRC, immed & 0xff);
+ } else {
+ *instr &= ~FIELD_PREP(OP_IMMED_B_SRC, 0xff);
+ *instr |= FIELD_PREP(OP_IMMED_B_SRC, immed & 0xff);
+ }
+
+ *instr &= ~OP_IMMED_IMM;
+ *instr |= FIELD_PREP(OP_IMMED_IMM, immed >> 8);
+}
+
+void immed_add_value(u64 *instr, u16 offset)
+{
+ u16 val;
+
+ if (!immed_can_modify(*instr))
+ return;
+
+ val = immed_get_value(*instr);
+ immed_set_value(instr, val + offset);
+}
+
static u16 nfp_swreg_to_unreg(swreg reg, bool is_dst)
{
bool lm_id, lm_dec = false;
@@ -120,7 +209,8 @@ int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg,
reg->dst = nfp_swreg_to_unreg(dst, true);
/* Decode source operands */
- if (swreg_type(lreg) == swreg_type(rreg))
+ if (swreg_type(lreg) == swreg_type(rreg) &&
+ swreg_type(lreg) != NN_REG_NONE)
return -EFAULT;
if (swreg_type(lreg) == NN_REG_GPR_B ||
@@ -200,7 +290,8 @@ int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,
reg->dst = nfp_swreg_to_rereg(dst, true, false, NULL);
/* Decode source operands */
- if (swreg_type(lreg) == swreg_type(rreg))
+ if (swreg_type(lreg) == swreg_type(rreg) &&
+ swreg_type(lreg) != NN_REG_NONE)
return -EFAULT;
if (swreg_type(lreg) == NN_REG_GPR_B ||
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
index 74d0c11ab2f9..5f9291db98e0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 Netronome Systems, Inc.
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -77,9 +77,11 @@
enum br_mask {
BR_BEQ = 0x00,
BR_BNE = 0x01,
+ BR_BMI = 0x02,
BR_BHS = 0x04,
BR_BLO = 0x05,
BR_BGE = 0x08,
+ BR_BLT = 0x09,
BR_UNC = 0x18,
};
@@ -92,6 +94,10 @@ enum br_ctx_signal_state {
BR_CSS_NONE = 2,
};
+u16 br_get_offset(u64 instr);
+void br_set_offset(u64 *instr, u16 offset);
+void br_add_offset(u64 *instr, u16 offset);
+
#define OP_BBYTE_BASE 0x0c800000000ULL
#define OP_BB_A_SRC 0x000000000ffULL
#define OP_BB_BYTE 0x00000000300ULL
@@ -132,6 +138,10 @@ enum immed_shift {
IMMED_SHIFT_2B = 2,
};
+u16 immed_get_value(u64 instr);
+void immed_set_value(u64 *instr, u16 immed);
+void immed_add_value(u64 *instr, u16 offset);
+
#define OP_SHF_BASE 0x08000000000ULL
#define OP_SHF_A_SRC 0x000000000ffULL
#define OP_SHF_SC 0x00000000300ULL
@@ -175,6 +185,7 @@ enum alu_op {
ALU_OP_NONE = 0x00,
ALU_OP_ADD = 0x01,
ALU_OP_NOT = 0x04,
+ ALU_OP_ADD_2B = 0x05,
ALU_OP_AND = 0x08,
ALU_OP_SUB_C = 0x0d,
ALU_OP_ADD_C = 0x11,
@@ -209,6 +220,7 @@ enum alu_dst_ab {
#define OP_CMD_CNT 0x0000e000000ULL
#define OP_CMD_SIG 0x000f0000000ULL
#define OP_CMD_TGT_CMD 0x07f00000000ULL
+#define OP_CMD_INDIR 0x20000000000ULL
#define OP_CMD_MODE 0x1c0000000000ULL
struct cmd_tgt_act {
@@ -219,6 +231,7 @@ struct cmd_tgt_act {
enum cmd_tgt_map {
CMD_TGT_READ8,
CMD_TGT_WRITE8_SWAP,
+ CMD_TGT_WRITE32_SWAP,
CMD_TGT_READ32,
CMD_TGT_READ32_LE,
CMD_TGT_READ32_SWAP,
@@ -240,6 +253,9 @@ enum cmd_ctx_swap {
CMD_CTX_NO_SWAP = 3,
};
+#define CMD_OVE_LEN BIT(7)
+#define CMD_OV_LEN GENMASK(12, 8)
+
#define OP_LCSR_BASE 0x0fc00000000ULL
#define OP_LCSR_A_SRC 0x000000003ffULL
#define OP_LCSR_B_SRC 0x000000ffc00ULL
@@ -257,6 +273,7 @@ enum lcsr_wr_src {
#define OP_CARB_BASE 0x0e000000000ULL
#define OP_CARB_OR 0x00000010000ULL
+#define NFP_CSR_CTX_PTR 0x20
#define NFP_CSR_ACT_LM_ADDR0 0x64
#define NFP_CSR_ACT_LM_ADDR1 0x6c
#define NFP_CSR_ACT_LM_ADDR2 0x94
@@ -377,4 +394,13 @@ int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,
int nfp_ustore_check_valid_no_ecc(u64 insn);
u64 nfp_ustore_calc_ecc_insn(u64 insn);
+#define NFP_IND_ME_REFL_WR_SIG_INIT 3
+#define NFP_IND_ME_CTX_PTR_BASE_MASK GENMASK(9, 0)
+#define NFP_IND_NUM_CONTEXTS 8
+
+static inline u32 nfp_get_ind_csr_ctx_ptr_offs(u32 read_offset)
+{
+ return (read_offset & ~NFP_IND_ME_CTX_PTR_BASE_MASK) | NFP_CSR_CTX_PTR;
+}
+
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index 6c9f29c2e975..eb0fc614673d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -152,18 +152,8 @@ out:
static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
struct nfp_pf *pf = devlink_priv(devlink);
- int ret;
-
- mutex_lock(&pf->lock);
- if (!pf->app) {
- ret = -EBUSY;
- goto out;
- }
- ret = nfp_app_eswitch_mode_get(pf->app, mode);
-out:
- mutex_unlock(&pf->lock);
- return ret;
+ return nfp_app_eswitch_mode_get(pf->app, mode);
}
const struct devlink_ops nfp_devlink_ops = {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 35eaccbece36..cc570bb6563c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -45,6 +45,7 @@
#include <linux/pci.h>
#include <linux/firmware.h>
#include <linux/vermagic.h>
+#include <linux/vmalloc.h>
#include <net/devlink.h>
#include "nfpcore/nfp.h"
@@ -498,17 +499,16 @@ static int nfp_pci_probe(struct pci_dev *pdev,
if (err)
goto err_hwinfo_free;
- err = devlink_register(devlink, &pdev->dev);
- if (err)
- goto err_hwinfo_free;
-
err = nfp_nsp_init(pdev, pf);
if (err)
- goto err_devlink_unreg;
+ goto err_hwinfo_free;
pf->mip = nfp_mip_open(pf->cpp);
pf->rtbl = __nfp_rtsym_table_read(pf->cpp, pf->mip);
+ pf->dump_flag = NFP_DUMP_NSP_DIAG;
+ pf->dumpspec = nfp_net_dump_load_dumpspec(pf->cpp, pf->rtbl);
+
err = nfp_pcie_sriov_read_nfd_limit(pf);
if (err)
goto err_fw_unload;
@@ -518,6 +518,7 @@ static int nfp_pci_probe(struct pci_dev *pdev,
dev_err(&pdev->dev,
"Error: %d VFs already enabled, but loaded FW can only support %d\n",
pf->num_vfs, pf->limit_vfs);
+ err = -EINVAL;
goto err_fw_unload;
}
@@ -544,8 +545,7 @@ err_fw_unload:
nfp_fw_unload(pf);
kfree(pf->eth_tbl);
kfree(pf->nspi);
-err_devlink_unreg:
- devlink_unregister(devlink);
+ vfree(pf->dumpspec);
err_hwinfo_free:
kfree(pf->hwinfo);
nfp_cpp_free(pf->cpp);
@@ -566,19 +566,15 @@ err_pci_disable:
static void nfp_pci_remove(struct pci_dev *pdev)
{
struct nfp_pf *pf = pci_get_drvdata(pdev);
- struct devlink *devlink;
nfp_hwmon_unregister(pf);
- devlink = priv_to_devlink(pf);
-
- nfp_net_pci_remove(pf);
-
nfp_pcie_sriov_disable(pdev);
pci_sriov_set_totalvfs(pf->pdev, 0);
- devlink_unregister(devlink);
+ nfp_net_pci_remove(pf);
+ vfree(pf->dumpspec);
kfree(pf->rtbl);
nfp_mip_close(pf->mip);
if (pf->fw_loaded)
@@ -592,7 +588,7 @@ static void nfp_pci_remove(struct pci_dev *pdev)
kfree(pf->eth_tbl);
kfree(pf->nspi);
mutex_destroy(&pf->lock);
- devlink_free(devlink);
+ devlink_free(priv_to_devlink(pf));
pci_release_regions(pdev);
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index be0ee59f2eb9..add46e28212b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -39,6 +39,7 @@
#ifndef NFP_MAIN_H
#define NFP_MAIN_H
+#include <linux/ethtool.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/msi.h>
@@ -62,6 +63,17 @@ struct nfp_port;
struct nfp_rtsym_table;
/**
+ * struct nfp_dumpspec - NFP FW dump specification structure
+ * @size: Size of the data
+ * @data: Sequence of TLVs, each being an instruction to dump some data
+ * from FW
+ */
+struct nfp_dumpspec {
+ u32 size;
+ u8 data[0];
+};
+
+/**
* struct nfp_pf - NFP PF-specific device structure
* @pdev: Backpointer to PCI device
* @cpp: Pointer to the CPP handle
@@ -83,6 +95,9 @@ struct nfp_rtsym_table;
* @mip: MIP handle
* @rtbl: RTsym table
* @hwinfo: HWInfo table
+ * @dumpspec: Debug dump specification
+ * @dump_flag: Store dump flag between set_dump and get_dump_flag
+ * @dump_len: Store dump length between set_dump and get_dump_flag
* @eth_tbl: NSP ETH table
* @nspi: NSP identification info
* @hwmon_dev: pointer to hwmon device
@@ -124,6 +139,9 @@ struct nfp_pf {
const struct nfp_mip *mip;
struct nfp_rtsym_table *rtbl;
struct nfp_hwinfo *hwinfo;
+ struct nfp_dumpspec *dumpspec;
+ u32 dump_flag;
+ u32 dump_len;
struct nfp_eth_table *eth_tbl;
struct nfp_nsp_identify *nspi;
@@ -157,4 +175,15 @@ void nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port);
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
+enum nfp_dump_diag {
+ NFP_DUMP_NSP_DIAG = 0,
+};
+
+struct nfp_dumpspec *
+nfp_net_dump_load_dumpspec(struct nfp_cpp *cpp, struct nfp_rtsym_table *rtbl);
+s64 nfp_net_dump_calculate_size(struct nfp_pf *pf, struct nfp_dumpspec *spec,
+ u32 flag);
+int nfp_net_dump_populate_buffer(struct nfp_pf *pf, struct nfp_dumpspec *spec,
+ struct ethtool_dump *dump_param, void *dest);
+
#endif /* NFP_MAIN_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 7f9857c276b1..d88eda9707e6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -47,6 +47,7 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/io-64-nonatomic-hi-lo.h>
+#include <net/xdp.h>
#include "nfp_net_ctrl.h"
@@ -350,6 +351,7 @@ struct nfp_net_rx_buf {
* @rxds: Virtual address of FL/RX ring in host memory
* @dma: DMA address of the FL/RX ring
* @size: Size, in bytes, of the FL/RX ring (needed to free)
+ * @xdp_rxq: RX-ring info avail for XDP
*/
struct nfp_net_rx_ring {
struct nfp_net_r_vector *r_vec;
@@ -361,13 +363,14 @@ struct nfp_net_rx_ring {
u32 idx;
int fl_qcidx;
+ unsigned int size;
u8 __iomem *qcp_fl;
struct nfp_net_rx_buf *rxbufs;
struct nfp_net_rx_desc *rxds;
dma_addr_t dma;
- unsigned int size;
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_aligned;
/**
@@ -548,6 +551,8 @@ struct nfp_net_dp {
* @max_r_vecs: Number of allocated interrupt vectors for RX/TX
* @max_tx_rings: Maximum number of TX rings supported by the Firmware
* @max_rx_rings: Maximum number of RX rings supported by the Firmware
+ * @stride_rx: Queue controller RX queue spacing
+ * @stride_tx: Queue controller TX queue spacing
* @r_vecs: Pre-allocated array of ring vectors
* @irq_entries: Pre-allocated array of MSI-X entries
* @lsc_handler: Handler for Link State Change interrupt
@@ -573,6 +578,7 @@ struct nfp_net_dp {
* @qcp_cfg: Pointer to QCP queue used for configuration notification
* @tx_bar: Pointer to mapped TX queues
* @rx_bar: Pointer to mapped FL/RX queues
+ * @tlv_caps: Parsed TLV capabilities
* @debugfs_dir: Device directory in debugfs
* @vnic_list: Entry on device vNIC list
* @pdev: Backpointer to PCI device
@@ -639,6 +645,8 @@ struct nfp_net {
u8 __iomem *tx_bar;
u8 __iomem *rx_bar;
+ struct nfp_net_tlv_caps tlv_caps;
+
struct dentry *debugfs_dir;
struct list_head vnic_list;
@@ -834,6 +842,18 @@ static inline const char *nfp_net_name(struct nfp_net *nn)
return nn->dp.netdev ? nn->dp.netdev->name : "ctrl";
}
+static inline void nfp_ctrl_lock(struct nfp_net *nn)
+ __acquires(&nn->r_vecs[0].lock)
+{
+ spin_lock_bh(&nn->r_vecs[0].lock);
+}
+
+static inline void nfp_ctrl_unlock(struct nfp_net *nn)
+ __releases(&nn->r_vecs[0].lock)
+{
+ spin_unlock_bh(&nn->r_vecs[0].lock);
+}
+
/* Globals */
extern const char nfp_driver_version[];
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 99b0487b6d82..c0fd351c86b1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -293,9 +293,15 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
*/
static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
{
+ u32 mbox = nn->tlv_caps.mbox_off;
int ret;
- nn_writeq(nn, NFP_NET_CFG_MBOX_CMD, mbox_cmd);
+ if (!nfp_net_has_mbox(&nn->tlv_caps)) {
+ nn_err(nn, "no mailbox present, command: %u\n", mbox_cmd);
+ return -EIO;
+ }
+
+ nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
ret = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
if (ret) {
@@ -303,7 +309,7 @@ static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
return ret;
}
- return -nn_readl(nn, NFP_NET_CFG_MBOX_RET);
+ return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
}
/* Interrupt configuration and handling
@@ -1610,11 +1616,13 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
unsigned int true_bufsz;
struct sk_buff *skb;
int pkts_polled = 0;
+ struct xdp_buff xdp;
int idx;
rcu_read_lock();
xdp_prog = READ_ONCE(dp->xdp_prog);
true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
+ xdp.rxq = &rx_ring->xdp_rxq;
tx_ring = r_vec->xdp_ring;
while (pkts_polled < budget) {
@@ -1705,7 +1713,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
dp->bpf_offload_xdp) && !meta.portid) {
void *orig_data = rxbuf->frag + pkt_off;
unsigned int dma_off;
- struct xdp_buff xdp;
int act;
xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
@@ -1919,6 +1926,13 @@ err_free:
return false;
}
+bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
+{
+ struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
+
+ return nfp_ctrl_tx_one(nn, r_vec, skb, false);
+}
+
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
{
struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
@@ -2254,6 +2268,8 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+ if (dp->netdev)
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
kfree(rx_ring->rxbufs);
if (rx_ring->rxds)
@@ -2277,7 +2293,14 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
static int
nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
{
- int sz;
+ int sz, err;
+
+ if (dp->netdev) {
+ err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
+ rx_ring->idx);
+ if (err < 0)
+ return err;
+ }
rx_ring->cnt = dp->rxd_cnt;
rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
@@ -2441,7 +2464,7 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
* ME timestamp ticks. There are 16 ME clock cycles for each timestamp
* count.
*/
- factor = nn->me_freq_mhz / 16;
+ factor = nn->tlv_caps.me_freq_mhz / 16;
/* copy RX interrupt coalesce parameters */
value = (nn->rx_coalesce_max_frames << 16) |
@@ -2852,6 +2875,11 @@ static void nfp_net_set_rx_mode(struct net_device *netdev)
new_ctrl = nn->dp.ctrl;
+ if (!netdev_mc_empty(netdev) || netdev->flags & IFF_ALLMULTI)
+ new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_L2MC;
+ else
+ new_ctrl &= ~NFP_NET_CFG_CTRL_L2MC;
+
if (netdev->flags & IFF_PROMISC) {
if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
new_ctrl |= NFP_NET_CFG_CTRL_PROMISC;
@@ -3036,6 +3064,11 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
{
struct nfp_net *nn = netdev_priv(netdev);
struct nfp_net_dp *dp;
+ int err;
+
+ err = nfp_app_change_mtu(nn->app, netdev, new_mtu);
+ if (err)
+ return err;
dp = nfp_net_clone_dp(nn);
if (!dp)
@@ -3057,8 +3090,9 @@ nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (!vid)
return 0;
- nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_VID, vid);
- nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_PROTO, ETH_P_8021Q);
+ nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
+ nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
+ ETH_P_8021Q);
return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD);
}
@@ -3074,8 +3108,9 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (!vid)
return 0;
- nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_VID, vid);
- nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_PROTO, ETH_P_8021Q);
+ nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
+ nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
+ ETH_P_8021Q);
return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
}
@@ -3368,7 +3403,7 @@ nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog, u32 flags,
if (err)
return err;
- err = nfp_app_xdp_offload(nn->app, nn, offload_prog);
+ err = nfp_app_xdp_offload(nn->app, nn, offload_prog, extack);
if (err && flags & XDP_FLAGS_HW_MODE)
return err;
@@ -3394,17 +3429,10 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
if (nn->dp.bpf_offload_xdp)
xdp->prog_attached = XDP_ATTACHED_HW;
xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0;
+ xdp->prog_flags = nn->xdp_prog ? nn->xdp_flags : 0;
return 0;
- case BPF_OFFLOAD_VERIFIER_PREP:
- return nfp_app_bpf_verifier_prep(nn->app, nn, xdp);
- case BPF_OFFLOAD_TRANSLATE:
- return nfp_app_bpf_translate(nn->app, nn,
- xdp->offload.prog);
- case BPF_OFFLOAD_DESTROY:
- return nfp_app_bpf_destroy(nn->app, nn,
- xdp->offload.prog);
default:
- return -EINVAL;
+ return nfp_app_bpf(nn->app, nn, xdp);
}
}
@@ -3563,9 +3591,6 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
*/
void nfp_net_free(struct nfp_net *nn)
{
- if (nn->xdp_prog)
- bpf_prog_put(nn->xdp_prog);
-
if (nn->dp.netdev)
free_netdev(nn->dp.netdev);
else
@@ -3731,18 +3756,8 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
nfp_net_set_ethtool_ops(netdev);
}
-/**
- * nfp_net_init() - Initialise/finalise the nfp_net structure
- * @nn: NFP Net device structure
- *
- * Return: 0 on success or negative errno on error.
- */
-int nfp_net_init(struct nfp_net *nn)
+static int nfp_net_read_caps(struct nfp_net *nn)
{
- int err;
-
- nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
-
/* Get some of the read-only fields from the BAR */
nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
@@ -3775,6 +3790,29 @@ int nfp_net_init(struct nfp_net *nn)
nn->dp.rx_offset = NFP_NET_RX_OFFSET;
}
+ /* For control vNICs mask out the capabilities app doesn't want. */
+ if (!nn->dp.netdev)
+ nn->cap &= nn->app->type->ctrl_cap_mask;
+
+ return 0;
+}
+
+/**
+ * nfp_net_init() - Initialise/finalise the nfp_net structure
+ * @nn: NFP Net device structure
+ *
+ * Return: 0 on success or negative errno on error.
+ */
+int nfp_net_init(struct nfp_net *nn)
+{
+ int err;
+
+ nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
+
+ err = nfp_net_read_caps(nn);
+ if (err)
+ return err;
+
/* Set default MTU and Freelist buffer size */
if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
nn->dp.mtu = nn->max_mtu;
@@ -3791,8 +3829,6 @@ int nfp_net_init(struct nfp_net *nn)
/* Allow L2 Broadcast and Multicast through by default, if supported */
if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC;
- if (nn->cap & NFP_NET_CFG_CTRL_L2MC)
- nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2MC;
/* Allow IRQ moderation, if supported */
if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
@@ -3800,6 +3836,11 @@ int nfp_net_init(struct nfp_net *nn)
nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
}
+ err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
+ &nn->tlv_caps);
+ if (err)
+ return err;
+
if (nn->dp.netdev)
nfp_net_netdev_init(nn);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
new file mode 100644
index 000000000000..ffb402746ad4
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2018 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "nfp_net_ctrl.h"
+#include "nfp_net.h"
+
+static void nfp_net_tlv_caps_reset(struct nfp_net_tlv_caps *caps)
+{
+ memset(caps, 0, sizeof(*caps));
+ caps->me_freq_mhz = 1200;
+ caps->mbox_off = NFP_NET_CFG_MBOX_BASE;
+ caps->mbox_len = NFP_NET_CFG_MBOX_VAL_MAX_SZ;
+}
+
+int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
+ struct nfp_net_tlv_caps *caps)
+{
+ u8 __iomem *data = ctrl_mem + NFP_NET_CFG_TLV_BASE;
+ u8 __iomem *end = ctrl_mem + NFP_NET_CFG_BAR_SZ;
+ u32 hdr;
+
+ nfp_net_tlv_caps_reset(caps);
+
+ hdr = readl(data);
+ if (!hdr)
+ return 0;
+
+ while (true) {
+ unsigned int length, offset;
+ u32 hdr = readl(data);
+
+ length = FIELD_GET(NFP_NET_CFG_TLV_HEADER_LENGTH, hdr);
+ offset = data - ctrl_mem + NFP_NET_CFG_TLV_BASE;
+
+ /* Advance past the header */
+ data += 4;
+
+ if (length % NFP_NET_CFG_TLV_LENGTH_INC) {
+ dev_err(dev, "TLV size not multiple of %u len:%u\n",
+ NFP_NET_CFG_TLV_LENGTH_INC, length);
+ return -EINVAL;
+ }
+ if (data + length > end) {
+ dev_err(dev, "oversized TLV offset:%u len:%u\n",
+ offset, length);
+ return -EINVAL;
+ }
+
+ switch (FIELD_GET(NFP_NET_CFG_TLV_HEADER_TYPE, hdr)) {
+ case NFP_NET_CFG_TLV_TYPE_UNKNOWN:
+ dev_err(dev, "NULL TLV at offset:%u\n", offset);
+ return -EINVAL;
+ case NFP_NET_CFG_TLV_TYPE_RESERVED:
+ break;
+ case NFP_NET_CFG_TLV_TYPE_END:
+ if (!length)
+ return 0;
+
+ dev_err(dev, "END TLV should be empty, has len:%d\n",
+ length);
+ return -EINVAL;
+ case NFP_NET_CFG_TLV_TYPE_ME_FREQ:
+ if (length != 4) {
+ dev_err(dev,
+ "ME FREQ TLV should be 4B, is %dB\n",
+ length);
+ return -EINVAL;
+ }
+
+ caps->me_freq_mhz = readl(data);
+ break;
+ case NFP_NET_CFG_TLV_TYPE_MBOX:
+ if (!length) {
+ caps->mbox_off = 0;
+ caps->mbox_len = 0;
+ } else {
+ caps->mbox_off = data - ctrl_mem;
+ caps->mbox_len = length;
+ }
+ break;
+ default:
+ if (!FIELD_GET(NFP_NET_CFG_TLV_HEADER_REQUIRED, hdr))
+ break;
+
+ dev_err(dev, "unknown TLV type:%u offset:%u len:%u\n",
+ FIELD_GET(NFP_NET_CFG_TLV_HEADER_TYPE, hdr),
+ offset, length);
+ return -EINVAL;
+ }
+
+ data += length;
+ if (data + 4 > end) {
+ dev_err(dev, "reached end of BAR without END TLV\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Not reached */
+ return -EINVAL;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 782d452e0fc2..eeecef2caac6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -43,9 +43,7 @@
#ifndef _NFP_NET_CTRL_H_
#define _NFP_NET_CTRL_H_
-/* IMPORTANT: This header file is shared with the FW,
- * no OS specific constructs, please!
- */
+#include <linux/types.h>
/**
* Configuration BAR size.
@@ -91,23 +89,24 @@
#define NFP_NET_RSS_IPV6_EX_UDP 9
/**
- * @NFP_NET_TXR_MAX: Maximum number of TX rings
- * @NFP_NET_RXR_MAX: Maximum number of RX rings
+ * Ring counts
+ * %NFP_NET_TXR_MAX: Maximum number of TX rings
+ * %NFP_NET_RXR_MAX: Maximum number of RX rings
*/
#define NFP_NET_TXR_MAX 64
#define NFP_NET_RXR_MAX 64
/**
* Read/Write config words (0x0000 - 0x002c)
- * @NFP_NET_CFG_CTRL: Global control
- * @NFP_NET_CFG_UPDATE: Indicate which fields are updated
- * @NFP_NET_CFG_TXRS_ENABLE: Bitmask of enabled TX rings
- * @NFP_NET_CFG_RXRS_ENABLE: Bitmask of enabled RX rings
- * @NFP_NET_CFG_MTU: Set MTU size
- * @NFP_NET_CFG_FLBUFSZ: Set freelist buffer size (must be larger than MTU)
- * @NFP_NET_CFG_EXN: MSI-X table entry for exceptions
- * @NFP_NET_CFG_LSC: MSI-X table entry for link state changes
- * @NFP_NET_CFG_MACADDR: MAC address
+ * %NFP_NET_CFG_CTRL: Global control
+ * %NFP_NET_CFG_UPDATE: Indicate which fields are updated
+ * %NFP_NET_CFG_TXRS_ENABLE: Bitmask of enabled TX rings
+ * %NFP_NET_CFG_RXRS_ENABLE: Bitmask of enabled RX rings
+ * %NFP_NET_CFG_MTU: Set MTU size
+ * %NFP_NET_CFG_FLBUFSZ: Set freelist buffer size (must be larger than MTU)
+ * %NFP_NET_CFG_EXN: MSI-X table entry for exceptions
+ * %NFP_NET_CFG_LSC: MSI-X table entry for link state changes
+ * %NFP_NET_CFG_MACADDR: MAC address
*
* TODO:
* - define Error details in UPDATE
@@ -176,14 +175,14 @@
/**
* Read-only words (0x0030 - 0x0050):
- * @NFP_NET_CFG_VERSION: Firmware version number
- * @NFP_NET_CFG_STS: Status
- * @NFP_NET_CFG_CAP: Capabilities (same bits as @NFP_NET_CFG_CTRL)
- * @NFP_NET_CFG_MAX_TXRINGS: Maximum number of TX rings
- * @NFP_NET_CFG_MAX_RXRINGS: Maximum number of RX rings
- * @NFP_NET_CFG_MAX_MTU: Maximum support MTU
- * @NFP_NET_CFG_START_TXQ: Start Queue Control Queue to use for TX (PF only)
- * @NFP_NET_CFG_START_RXQ: Start Queue Control Queue to use for RX (PF only)
+ * %NFP_NET_CFG_VERSION: Firmware version number
+ * %NFP_NET_CFG_STS: Status
+ * %NFP_NET_CFG_CAP: Capabilities (same bits as %NFP_NET_CFG_CTRL)
+ * %NFP_NET_CFG_MAX_TXRINGS: Maximum number of TX rings
+ * %NFP_NET_CFG_MAX_RXRINGS: Maximum number of RX rings
+ * %NFP_NET_CFG_MAX_MTU: Maximum support MTU
+ * %NFP_NET_CFG_START_TXQ: Start Queue Control Queue to use for TX (PF only)
+ * %NFP_NET_CFG_START_RXQ: Start Queue Control Queue to use for RX (PF only)
*
* TODO:
* - define more STS bits
@@ -228,31 +227,37 @@
/**
* RSS capabilities
- * @NFP_NET_CFG_RSS_CAP_HFUNC: supported hash functions (same bits as
- * @NFP_NET_CFG_RSS_HFUNC)
+ * %NFP_NET_CFG_RSS_CAP_HFUNC: supported hash functions (same bits as
+ * %NFP_NET_CFG_RSS_HFUNC)
*/
#define NFP_NET_CFG_RSS_CAP 0x0054
#define NFP_NET_CFG_RSS_CAP_HFUNC 0xff000000
/**
+ * TLV area start
+ * %NFP_NET_CFG_TLV_BASE: start anchor of the TLV area
+ */
+#define NFP_NET_CFG_TLV_BASE 0x0058
+
+/**
* VXLAN/UDP encap configuration
- * @NFP_NET_CFG_VXLAN_PORT: Base address of table of tunnels' UDP dst ports
- * @NFP_NET_CFG_VXLAN_SZ: Size of the UDP port table in bytes
+ * %NFP_NET_CFG_VXLAN_PORT: Base address of table of tunnels' UDP dst ports
+ * %NFP_NET_CFG_VXLAN_SZ: Size of the UDP port table in bytes
*/
#define NFP_NET_CFG_VXLAN_PORT 0x0060
#define NFP_NET_CFG_VXLAN_SZ 0x0008
/**
* BPF section
- * @NFP_NET_CFG_BPF_ABI: BPF ABI version
- * @NFP_NET_CFG_BPF_CAP: BPF capabilities
- * @NFP_NET_CFG_BPF_MAX_LEN: Maximum size of JITed BPF code in bytes
- * @NFP_NET_CFG_BPF_START: Offset at which BPF will be loaded
- * @NFP_NET_CFG_BPF_DONE: Offset to jump to on exit
- * @NFP_NET_CFG_BPF_STACK_SZ: Total size of stack area in 64B chunks
- * @NFP_NET_CFG_BPF_INL_MTU: Packet data split offset in 64B chunks
- * @NFP_NET_CFG_BPF_SIZE: Size of the JITed BPF code in instructions
- * @NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code
+ * %NFP_NET_CFG_BPF_ABI: BPF ABI version
+ * %NFP_NET_CFG_BPF_CAP: BPF capabilities
+ * %NFP_NET_CFG_BPF_MAX_LEN: Maximum size of JITed BPF code in bytes
+ * %NFP_NET_CFG_BPF_START: Offset at which BPF will be loaded
+ * %NFP_NET_CFG_BPF_DONE: Offset to jump to on exit
+ * %NFP_NET_CFG_BPF_STACK_SZ: Total size of stack area in 64B chunks
+ * %NFP_NET_CFG_BPF_INL_MTU: Packet data split offset in 64B chunks
+ * %NFP_NET_CFG_BPF_SIZE: Size of the JITed BPF code in instructions
+ * %NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code
*/
#define NFP_NET_CFG_BPF_ABI 0x0080
#define NFP_NET_BPF_ABI 2
@@ -278,9 +283,9 @@
/**
* RSS configuration (0x0100 - 0x01ac):
* Used only when NFP_NET_CFG_CTRL_RSS is enabled
- * @NFP_NET_CFG_RSS_CFG: RSS configuration word
- * @NFP_NET_CFG_RSS_KEY: RSS "secret" key
- * @NFP_NET_CFG_RSS_ITBL: RSS indirection table
+ * %NFP_NET_CFG_RSS_CFG: RSS configuration word
+ * %NFP_NET_CFG_RSS_KEY: RSS "secret" key
+ * %NFP_NET_CFG_RSS_ITBL: RSS indirection table
*/
#define NFP_NET_CFG_RSS_BASE 0x0100
#define NFP_NET_CFG_RSS_CTRL NFP_NET_CFG_RSS_BASE
@@ -305,13 +310,13 @@
/**
* TX ring configuration (0x200 - 0x800)
- * @NFP_NET_CFG_TXR_BASE: Base offset for TX ring configuration
- * @NFP_NET_CFG_TXR_ADDR: Per TX ring DMA address (8B entries)
- * @NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries)
- * @NFP_NET_CFG_TXR_SZ: Per TX ring ring size (1B entries)
- * @NFP_NET_CFG_TXR_VEC: Per TX ring MSI-X table entry (1B entries)
- * @NFP_NET_CFG_TXR_PRIO: Per TX ring priority (1B entries)
- * @NFP_NET_CFG_TXR_IRQ_MOD: Per TX ring interrupt moderation packet
+ * %NFP_NET_CFG_TXR_BASE: Base offset for TX ring configuration
+ * %NFP_NET_CFG_TXR_ADDR: Per TX ring DMA address (8B entries)
+ * %NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries)
+ * %NFP_NET_CFG_TXR_SZ: Per TX ring ring size (1B entries)
+ * %NFP_NET_CFG_TXR_VEC: Per TX ring MSI-X table entry (1B entries)
+ * %NFP_NET_CFG_TXR_PRIO: Per TX ring priority (1B entries)
+ * %NFP_NET_CFG_TXR_IRQ_MOD: Per TX ring interrupt moderation packet
*/
#define NFP_NET_CFG_TXR_BASE 0x0200
#define NFP_NET_CFG_TXR_ADDR(_x) (NFP_NET_CFG_TXR_BASE + ((_x) * 0x8))
@@ -325,12 +330,12 @@
/**
* RX ring configuration (0x0800 - 0x0c00)
- * @NFP_NET_CFG_RXR_BASE: Base offset for RX ring configuration
- * @NFP_NET_CFG_RXR_ADDR: Per RX ring DMA address (8B entries)
- * @NFP_NET_CFG_RXR_SZ: Per RX ring ring size (1B entries)
- * @NFP_NET_CFG_RXR_VEC: Per RX ring MSI-X table entry (1B entries)
- * @NFP_NET_CFG_RXR_PRIO: Per RX ring priority (1B entries)
- * @NFP_NET_CFG_RXR_IRQ_MOD: Per RX ring interrupt moderation (4B entries)
+ * %NFP_NET_CFG_RXR_BASE: Base offset for RX ring configuration
+ * %NFP_NET_CFG_RXR_ADDR: Per RX ring DMA address (8B entries)
+ * %NFP_NET_CFG_RXR_SZ: Per RX ring ring size (1B entries)
+ * %NFP_NET_CFG_RXR_VEC: Per RX ring MSI-X table entry (1B entries)
+ * %NFP_NET_CFG_RXR_PRIO: Per RX ring priority (1B entries)
+ * %NFP_NET_CFG_RXR_IRQ_MOD: Per RX ring interrupt moderation (4B entries)
*/
#define NFP_NET_CFG_RXR_BASE 0x0800
#define NFP_NET_CFG_RXR_ADDR(_x) (NFP_NET_CFG_RXR_BASE + ((_x) * 0x8))
@@ -343,7 +348,7 @@
/**
* Interrupt Control/Cause registers (0x0c00 - 0x0d00)
* These registers are only used when MSI-X auto-masking is not
- * enabled (@NFP_NET_CFG_CTRL_MSIXAUTO not set). The array is index
+ * enabled (%NFP_NET_CFG_CTRL_MSIXAUTO not set). The array is index
* by MSI-X entry and are 1B in size. If an entry is zero, the
* corresponding entry is enabled. If the FW generates an interrupt,
* it writes a cause into the corresponding field. This also masks
@@ -393,8 +398,8 @@
/**
* Per ring stats (0x1000 - 0x1800)
* options, 64bit per entry
- * @NFP_NET_CFG_TXR_STATS: TX ring statistics (Packet and Byte count)
- * @NFP_NET_CFG_RXR_STATS: RX ring statistics (Packet and Byte count)
+ * %NFP_NET_CFG_TXR_STATS: TX ring statistics (Packet and Byte count)
+ * %NFP_NET_CFG_RXR_STATS: RX ring statistics (Packet and Byte count)
*/
#define NFP_NET_CFG_TXR_STATS_BASE 0x1000
#define NFP_NET_CFG_TXR_STATS(_x) (NFP_NET_CFG_TXR_STATS_BASE + \
@@ -408,24 +413,105 @@
* 4B used for update command and 4B return code
* followed by a max of 504B of variable length value
*/
-#define NFP_NET_CFG_MBOX_CMD 0x1800
-#define NFP_NET_CFG_MBOX_RET 0x1804
-#define NFP_NET_CFG_MBOX_VAL 0x1808
+#define NFP_NET_CFG_MBOX_BASE 0x1800
#define NFP_NET_CFG_MBOX_VAL_MAX_SZ 0x1F8
+#define NFP_NET_CFG_MBOX_SIMPLE_CMD 0x0
+#define NFP_NET_CFG_MBOX_SIMPLE_RET 0x4
+#define NFP_NET_CFG_MBOX_SIMPLE_VAL 0x8
+#define NFP_NET_CFG_MBOX_SIMPLE_LEN 0x12
+
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD 1
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2
/**
* VLAN filtering using general use mailbox
- * @NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox
- * @NFP_NET_CFG_VLAN_FILTER_VID: VLAN ID to filter
- * @NFP_NET_CFG_VLAN_FILTER_PROTO: VLAN proto to filter
- * @NFP_NET_CFG_VXLAN_SZ: Size of the VLAN filter mailbox in bytes
+ * %NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox
+ * %NFP_NET_CFG_VLAN_FILTER_VID: VLAN ID to filter
+ * %NFP_NET_CFG_VLAN_FILTER_PROTO: VLAN proto to filter
+ * %NFP_NET_CFG_VXLAN_SZ: Size of the VLAN filter mailbox in bytes
*/
-#define NFP_NET_CFG_VLAN_FILTER NFP_NET_CFG_MBOX_VAL
+#define NFP_NET_CFG_VLAN_FILTER NFP_NET_CFG_MBOX_SIMPLE_VAL
#define NFP_NET_CFG_VLAN_FILTER_VID NFP_NET_CFG_VLAN_FILTER
#define NFP_NET_CFG_VLAN_FILTER_PROTO (NFP_NET_CFG_VLAN_FILTER + 2)
#define NFP_NET_CFG_VLAN_FILTER_SZ 0x0004
+/**
+ * TLV capabilities
+ * %NFP_NET_CFG_TLV_TYPE: Offset of type within the TLV
+ * %NFP_NET_CFG_TLV_TYPE_REQUIRED: Driver must be able to parse the TLV
+ * %NFP_NET_CFG_TLV_LENGTH: Offset of length within the TLV
+ * %NFP_NET_CFG_TLV_LENGTH_INC: TLV length increments
+ * %NFP_NET_CFG_TLV_VALUE: Offset of value with the TLV
+ *
+ * List of simple TLV structures, first one starts at %NFP_NET_CFG_TLV_BASE.
+ * Last structure must be of type %NFP_NET_CFG_TLV_TYPE_END. Presence of TLVs
+ * is indicated by %NFP_NET_CFG_TLV_BASE being non-zero. TLV structures may
+ * fill the entire remainder of the BAR or be shorter. FW must make sure TLVs
+ * don't conflict with other features which allocate space beyond
+ * %NFP_NET_CFG_TLV_BASE. %NFP_NET_CFG_TLV_TYPE_RESERVED should be used to wrap
+ * space used by such features.
+ * Note that the 4 byte TLV header is not counted in %NFP_NET_CFG_TLV_LENGTH.
+ */
+#define NFP_NET_CFG_TLV_TYPE 0x00
+#define NFP_NET_CFG_TLV_TYPE_REQUIRED 0x8000
+#define NFP_NET_CFG_TLV_LENGTH 0x02
+#define NFP_NET_CFG_TLV_LENGTH_INC 4
+#define NFP_NET_CFG_TLV_VALUE 0x04
+
+#define NFP_NET_CFG_TLV_HEADER_REQUIRED 0x80000000
+#define NFP_NET_CFG_TLV_HEADER_TYPE 0x7fff0000
+#define NFP_NET_CFG_TLV_HEADER_LENGTH 0x0000ffff
+
+/**
+ * Capability TLV types
+ *
+ * %NFP_NET_CFG_TLV_TYPE_UNKNOWN:
+ * Special TLV type to catch bugs, should never be encountered. Drivers should
+ * treat encountering this type as error and refuse to probe.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_RESERVED:
+ * Reserved space, may contain legacy fixed-offset fields, or be used for
+ * padding. The use of this type should be otherwise avoided.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_END:
+ * Empty, end of TLV list. Must be the last TLV. Drivers will stop processing
+ * further TLVs when encountered.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_ME_FREQ:
+ * Single word, ME frequency in MHz as used in calculation for
+ * %NFP_NET_CFG_RXR_IRQ_MOD and %NFP_NET_CFG_TXR_IRQ_MOD.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_MBOX:
+ * Variable, mailbox area. Overwrites the default location which is
+ * %NFP_NET_CFG_MBOX_BASE and length %NFP_NET_CFG_MBOX_VAL_MAX_SZ.
+ */
+#define NFP_NET_CFG_TLV_TYPE_UNKNOWN 0
+#define NFP_NET_CFG_TLV_TYPE_RESERVED 1
+#define NFP_NET_CFG_TLV_TYPE_END 2
+#define NFP_NET_CFG_TLV_TYPE_ME_FREQ 3
+#define NFP_NET_CFG_TLV_TYPE_MBOX 4
+
+struct device;
+
+/**
+ * struct nfp_net_tlv_caps - parsed control BAR TLV capabilities
+ * @me_freq_mhz: ME clock_freq (MHz)
+ * @mbox_off: vNIC mailbox area offset
+ * @mbox_len: vNIC mailbox area length
+ */
+struct nfp_net_tlv_caps {
+ u32 me_freq_mhz;
+ unsigned int mbox_off;
+ unsigned int mbox_len;
+};
+
+int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
+ struct nfp_net_tlv_caps *caps);
+
+static inline bool nfp_net_has_mbox(struct nfp_net_tlv_caps *caps)
+{
+ return caps->mbox_len >= NFP_NET_CFG_MBOX_SIMPLE_LEN;
+}
+
#endif /* _NFP_NET_CTRL_H_ */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
new file mode 100644
index 000000000000..bb8ed460086e
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
@@ -0,0 +1,811 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+
+#include "nfp_asm.h"
+#include "nfp_main.h"
+#include "nfpcore/nfp.h"
+#include "nfpcore/nfp_nffw.h"
+#include "nfpcore/nfp6000/nfp6000.h"
+
+#define NFP_DUMP_SPEC_RTSYM "_abi_dump_spec"
+
+#define ALIGN8(x) ALIGN(x, 8)
+
+enum nfp_dumpspec_type {
+ NFP_DUMPSPEC_TYPE_CPP_CSR = 0,
+ NFP_DUMPSPEC_TYPE_XPB_CSR = 1,
+ NFP_DUMPSPEC_TYPE_ME_CSR = 2,
+ NFP_DUMPSPEC_TYPE_INDIRECT_ME_CSR = 3,
+ NFP_DUMPSPEC_TYPE_RTSYM = 4,
+ NFP_DUMPSPEC_TYPE_HWINFO = 5,
+ NFP_DUMPSPEC_TYPE_FWNAME = 6,
+ NFP_DUMPSPEC_TYPE_HWINFO_FIELD = 7,
+ NFP_DUMPSPEC_TYPE_PROLOG = 10000,
+ NFP_DUMPSPEC_TYPE_ERROR = 10001,
+};
+
+/* The following structs must be carefully aligned so that they can be used to
+ * interpret the binary dumpspec and populate the dump data in a deterministic
+ * way.
+ */
+
+/* generic type plus length */
+struct nfp_dump_tl {
+ __be32 type;
+ __be32 length; /* chunk length to follow, aligned to 8 bytes */
+ char data[0];
+};
+
+/* NFP CPP parameters */
+struct nfp_dumpspec_cpp_isl_id {
+ u8 target;
+ u8 action;
+ u8 token;
+ u8 island;
+};
+
+struct nfp_dump_common_cpp {
+ struct nfp_dumpspec_cpp_isl_id cpp_id;
+ __be32 offset; /* address to start dump */
+ __be32 dump_length; /* total bytes to dump, aligned to reg size */
+};
+
+/* CSR dumpables */
+struct nfp_dumpspec_csr {
+ struct nfp_dump_tl tl;
+ struct nfp_dump_common_cpp cpp;
+ __be32 register_width; /* in bits */
+};
+
+struct nfp_dumpspec_rtsym {
+ struct nfp_dump_tl tl;
+ char rtsym[0];
+};
+
+/* header for register dumpable */
+struct nfp_dump_csr {
+ struct nfp_dump_tl tl;
+ struct nfp_dump_common_cpp cpp;
+ __be32 register_width; /* in bits */
+ __be32 error; /* error code encountered while reading */
+ __be32 error_offset; /* offset being read when error occurred */
+};
+
+struct nfp_dump_rtsym {
+ struct nfp_dump_tl tl;
+ struct nfp_dump_common_cpp cpp;
+ __be32 error; /* error code encountered while reading */
+ u8 padded_name_length; /* pad so data starts at 8 byte boundary */
+ char rtsym[0];
+ /* after padded_name_length, there is dump_length data */
+};
+
+struct nfp_dump_prolog {
+ struct nfp_dump_tl tl;
+ __be32 dump_level;
+};
+
+struct nfp_dump_error {
+ struct nfp_dump_tl tl;
+ __be32 error;
+ char padding[4];
+ char spec[0];
+};
+
+/* to track state through debug size calculation TLV traversal */
+struct nfp_level_size {
+ __be32 requested_level; /* input */
+ u32 total_size; /* output */
+};
+
+/* to track state during debug dump creation TLV traversal */
+struct nfp_dump_state {
+ __be32 requested_level; /* input param */
+ u32 dumped_size; /* adds up to size of dumped data */
+ u32 buf_size; /* size of buffer pointer to by p */
+ void *p; /* current point in dump buffer */
+};
+
+typedef int (*nfp_tlv_visit)(struct nfp_pf *pf, struct nfp_dump_tl *tl,
+ void *param);
+
+static int
+nfp_traverse_tlvs(struct nfp_pf *pf, void *data, u32 data_length, void *param,
+ nfp_tlv_visit tlv_visit)
+{
+ long long remaining = data_length;
+ struct nfp_dump_tl *tl;
+ u32 total_tlv_size;
+ void *p = data;
+ int err;
+
+ while (remaining >= sizeof(*tl)) {
+ tl = p;
+ if (!tl->type && !tl->length)
+ break;
+
+ if (be32_to_cpu(tl->length) > remaining - sizeof(*tl))
+ return -EINVAL;
+
+ total_tlv_size = sizeof(*tl) + be32_to_cpu(tl->length);
+
+ /* Spec TLVs should be aligned to 4 bytes. */
+ if (total_tlv_size % 4 != 0)
+ return -EINVAL;
+
+ p += total_tlv_size;
+ remaining -= total_tlv_size;
+ err = tlv_visit(pf, tl, param);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static u32 nfp_get_numeric_cpp_id(struct nfp_dumpspec_cpp_isl_id *cpp_id)
+{
+ return NFP_CPP_ISLAND_ID(cpp_id->target, cpp_id->action, cpp_id->token,
+ cpp_id->island);
+}
+
+struct nfp_dumpspec *
+nfp_net_dump_load_dumpspec(struct nfp_cpp *cpp, struct nfp_rtsym_table *rtbl)
+{
+ const struct nfp_rtsym *specsym;
+ struct nfp_dumpspec *dumpspec;
+ int bytes_read;
+ u32 cpp_id;
+
+ specsym = nfp_rtsym_lookup(rtbl, NFP_DUMP_SPEC_RTSYM);
+ if (!specsym)
+ return NULL;
+
+ /* expected size of this buffer is in the order of tens of kilobytes */
+ dumpspec = vmalloc(sizeof(*dumpspec) + specsym->size);
+ if (!dumpspec)
+ return NULL;
+
+ dumpspec->size = specsym->size;
+
+ cpp_id = NFP_CPP_ISLAND_ID(specsym->target, NFP_CPP_ACTION_RW, 0,
+ specsym->domain);
+
+ bytes_read = nfp_cpp_read(cpp, cpp_id, specsym->addr, dumpspec->data,
+ specsym->size);
+ if (bytes_read != specsym->size) {
+ vfree(dumpspec);
+ nfp_warn(cpp, "Debug dump specification read failed.\n");
+ return NULL;
+ }
+
+ return dumpspec;
+}
+
+static int nfp_dump_error_tlv_size(struct nfp_dump_tl *spec)
+{
+ return ALIGN8(sizeof(struct nfp_dump_error) + sizeof(*spec) +
+ be32_to_cpu(spec->length));
+}
+
+static int nfp_calc_fwname_tlv_size(struct nfp_pf *pf)
+{
+ u32 fwname_len = strlen(nfp_mip_name(pf->mip));
+
+ return sizeof(struct nfp_dump_tl) + ALIGN8(fwname_len + 1);
+}
+
+static int nfp_calc_hwinfo_field_sz(struct nfp_pf *pf, struct nfp_dump_tl *spec)
+{
+ u32 tl_len, key_len;
+ const char *value;
+
+ tl_len = be32_to_cpu(spec->length);
+ key_len = strnlen(spec->data, tl_len);
+ if (key_len == tl_len)
+ return nfp_dump_error_tlv_size(spec);
+
+ value = nfp_hwinfo_lookup(pf->hwinfo, spec->data);
+ if (!value)
+ return nfp_dump_error_tlv_size(spec);
+
+ return sizeof(struct nfp_dump_tl) + ALIGN8(key_len + strlen(value) + 2);
+}
+
+static bool nfp_csr_spec_valid(struct nfp_dumpspec_csr *spec_csr)
+{
+ u32 required_read_sz = sizeof(*spec_csr) - sizeof(spec_csr->tl);
+ u32 available_sz = be32_to_cpu(spec_csr->tl.length);
+ u32 reg_width;
+
+ if (available_sz < required_read_sz)
+ return false;
+
+ reg_width = be32_to_cpu(spec_csr->register_width);
+
+ return reg_width == 32 || reg_width == 64;
+}
+
+static int
+nfp_calc_rtsym_dump_sz(struct nfp_pf *pf, struct nfp_dump_tl *spec)
+{
+ struct nfp_rtsym_table *rtbl = pf->rtbl;
+ struct nfp_dumpspec_rtsym *spec_rtsym;
+ const struct nfp_rtsym *sym;
+ u32 tl_len, key_len;
+ u32 size;
+
+ spec_rtsym = (struct nfp_dumpspec_rtsym *)spec;
+ tl_len = be32_to_cpu(spec->length);
+ key_len = strnlen(spec_rtsym->rtsym, tl_len);
+ if (key_len == tl_len)
+ return nfp_dump_error_tlv_size(spec);
+
+ sym = nfp_rtsym_lookup(rtbl, spec_rtsym->rtsym);
+ if (!sym)
+ return nfp_dump_error_tlv_size(spec);
+
+ if (sym->type == NFP_RTSYM_TYPE_ABS)
+ size = sizeof(sym->addr);
+ else
+ size = sym->size;
+
+ return ALIGN8(offsetof(struct nfp_dump_rtsym, rtsym) + key_len + 1) +
+ ALIGN8(size);
+}
+
+static int
+nfp_add_tlv_size(struct nfp_pf *pf, struct nfp_dump_tl *tl, void *param)
+{
+ struct nfp_dumpspec_csr *spec_csr;
+ u32 *size = param;
+ u32 hwinfo_size;
+
+ switch (be32_to_cpu(tl->type)) {
+ case NFP_DUMPSPEC_TYPE_FWNAME:
+ *size += nfp_calc_fwname_tlv_size(pf);
+ break;
+ case NFP_DUMPSPEC_TYPE_CPP_CSR:
+ case NFP_DUMPSPEC_TYPE_XPB_CSR:
+ case NFP_DUMPSPEC_TYPE_ME_CSR:
+ spec_csr = (struct nfp_dumpspec_csr *)tl;
+ if (!nfp_csr_spec_valid(spec_csr))
+ *size += nfp_dump_error_tlv_size(tl);
+ else
+ *size += ALIGN8(sizeof(struct nfp_dump_csr)) +
+ ALIGN8(be32_to_cpu(spec_csr->cpp.dump_length));
+ break;
+ case NFP_DUMPSPEC_TYPE_INDIRECT_ME_CSR:
+ spec_csr = (struct nfp_dumpspec_csr *)tl;
+ if (!nfp_csr_spec_valid(spec_csr))
+ *size += nfp_dump_error_tlv_size(tl);
+ else
+ *size += ALIGN8(sizeof(struct nfp_dump_csr)) +
+ ALIGN8(be32_to_cpu(spec_csr->cpp.dump_length) *
+ NFP_IND_NUM_CONTEXTS);
+ break;
+ case NFP_DUMPSPEC_TYPE_RTSYM:
+ *size += nfp_calc_rtsym_dump_sz(pf, tl);
+ break;
+ case NFP_DUMPSPEC_TYPE_HWINFO:
+ hwinfo_size = nfp_hwinfo_get_packed_str_size(pf->hwinfo);
+ *size += sizeof(struct nfp_dump_tl) + ALIGN8(hwinfo_size);
+ break;
+ case NFP_DUMPSPEC_TYPE_HWINFO_FIELD:
+ *size += nfp_calc_hwinfo_field_sz(pf, tl);
+ break;
+ default:
+ *size += nfp_dump_error_tlv_size(tl);
+ break;
+ }
+
+ return 0;
+}
+
+static int
+nfp_calc_specific_level_size(struct nfp_pf *pf, struct nfp_dump_tl *dump_level,
+ void *param)
+{
+ struct nfp_level_size *lev_sz = param;
+
+ if (dump_level->type != lev_sz->requested_level)
+ return 0;
+
+ return nfp_traverse_tlvs(pf, dump_level->data,
+ be32_to_cpu(dump_level->length),
+ &lev_sz->total_size, nfp_add_tlv_size);
+}
+
+s64 nfp_net_dump_calculate_size(struct nfp_pf *pf, struct nfp_dumpspec *spec,
+ u32 flag)
+{
+ struct nfp_level_size lev_sz;
+ int err;
+
+ lev_sz.requested_level = cpu_to_be32(flag);
+ lev_sz.total_size = ALIGN8(sizeof(struct nfp_dump_prolog));
+
+ err = nfp_traverse_tlvs(pf, spec->data, spec->size, &lev_sz,
+ nfp_calc_specific_level_size);
+ if (err)
+ return err;
+
+ return lev_sz.total_size;
+}
+
+static int nfp_add_tlv(u32 type, u32 total_tlv_sz, struct nfp_dump_state *dump)
+{
+ struct nfp_dump_tl *tl = dump->p;
+
+ if (total_tlv_sz > dump->buf_size)
+ return -ENOSPC;
+
+ if (dump->buf_size - total_tlv_sz < dump->dumped_size)
+ return -ENOSPC;
+
+ tl->type = cpu_to_be32(type);
+ tl->length = cpu_to_be32(total_tlv_sz - sizeof(*tl));
+
+ dump->dumped_size += total_tlv_sz;
+ dump->p += total_tlv_sz;
+
+ return 0;
+}
+
+static int
+nfp_dump_error_tlv(struct nfp_dump_tl *spec, int error,
+ struct nfp_dump_state *dump)
+{
+ struct nfp_dump_error *dump_header = dump->p;
+ u32 total_spec_size, total_size;
+ int err;
+
+ total_spec_size = sizeof(*spec) + be32_to_cpu(spec->length);
+ total_size = ALIGN8(sizeof(*dump_header) + total_spec_size);
+
+ err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_ERROR, total_size, dump);
+ if (err)
+ return err;
+
+ dump_header->error = cpu_to_be32(error);
+ memcpy(dump_header->spec, spec, total_spec_size);
+
+ return 0;
+}
+
+static int nfp_dump_fwname(struct nfp_pf *pf, struct nfp_dump_state *dump)
+{
+ struct nfp_dump_tl *dump_header = dump->p;
+ u32 fwname_len, total_size;
+ const char *fwname;
+ int err;
+
+ fwname = nfp_mip_name(pf->mip);
+ fwname_len = strlen(fwname);
+ total_size = sizeof(*dump_header) + ALIGN8(fwname_len + 1);
+
+ err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_FWNAME, total_size, dump);
+ if (err)
+ return err;
+
+ memcpy(dump_header->data, fwname, fwname_len);
+
+ return 0;
+}
+
+static int
+nfp_dump_hwinfo(struct nfp_pf *pf, struct nfp_dump_tl *spec,
+ struct nfp_dump_state *dump)
+{
+ struct nfp_dump_tl *dump_header = dump->p;
+ u32 hwinfo_size, total_size;
+ char *hwinfo;
+ int err;
+
+ hwinfo = nfp_hwinfo_get_packed_strings(pf->hwinfo);
+ hwinfo_size = nfp_hwinfo_get_packed_str_size(pf->hwinfo);
+ total_size = sizeof(*dump_header) + ALIGN8(hwinfo_size);
+
+ err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_HWINFO, total_size, dump);
+ if (err)
+ return err;
+
+ memcpy(dump_header->data, hwinfo, hwinfo_size);
+
+ return 0;
+}
+
+static int nfp_dump_hwinfo_field(struct nfp_pf *pf, struct nfp_dump_tl *spec,
+ struct nfp_dump_state *dump)
+{
+ struct nfp_dump_tl *dump_header = dump->p;
+ u32 tl_len, key_len, val_len;
+ const char *key, *value;
+ u32 total_size;
+ int err;
+
+ tl_len = be32_to_cpu(spec->length);
+ key_len = strnlen(spec->data, tl_len);
+ if (key_len == tl_len)
+ return nfp_dump_error_tlv(spec, -EINVAL, dump);
+
+ key = spec->data;
+ value = nfp_hwinfo_lookup(pf->hwinfo, key);
+ if (!value)
+ return nfp_dump_error_tlv(spec, -ENOENT, dump);
+
+ val_len = strlen(value);
+ total_size = sizeof(*dump_header) + ALIGN8(key_len + val_len + 2);
+ err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_HWINFO_FIELD, total_size, dump);
+ if (err)
+ return err;
+
+ memcpy(dump_header->data, key, key_len + 1);
+ memcpy(dump_header->data + key_len + 1, value, val_len + 1);
+
+ return 0;
+}
+
+static bool is_xpb_read(struct nfp_dumpspec_cpp_isl_id *cpp_id)
+{
+ return cpp_id->target == NFP_CPP_TARGET_ISLAND_XPB &&
+ cpp_id->action == 0 && cpp_id->token == 0;
+}
+
+static int
+nfp_dump_csr_range(struct nfp_pf *pf, struct nfp_dumpspec_csr *spec_csr,
+ struct nfp_dump_state *dump)
+{
+ struct nfp_dump_csr *dump_header = dump->p;
+ u32 reg_sz, header_size, total_size;
+ u32 cpp_rd_addr, max_rd_addr;
+ int bytes_read;
+ void *dest;
+ u32 cpp_id;
+ int err;
+
+ if (!nfp_csr_spec_valid(spec_csr))
+ return nfp_dump_error_tlv(&spec_csr->tl, -EINVAL, dump);
+
+ reg_sz = be32_to_cpu(spec_csr->register_width) / BITS_PER_BYTE;
+ header_size = ALIGN8(sizeof(*dump_header));
+ total_size = header_size +
+ ALIGN8(be32_to_cpu(spec_csr->cpp.dump_length));
+ dest = dump->p + header_size;
+
+ err = nfp_add_tlv(be32_to_cpu(spec_csr->tl.type), total_size, dump);
+ if (err)
+ return err;
+
+ dump_header->cpp = spec_csr->cpp;
+ dump_header->register_width = spec_csr->register_width;
+
+ cpp_id = nfp_get_numeric_cpp_id(&spec_csr->cpp.cpp_id);
+ cpp_rd_addr = be32_to_cpu(spec_csr->cpp.offset);
+ max_rd_addr = cpp_rd_addr + be32_to_cpu(spec_csr->cpp.dump_length);
+
+ while (cpp_rd_addr < max_rd_addr) {
+ if (is_xpb_read(&spec_csr->cpp.cpp_id)) {
+ err = nfp_xpb_readl(pf->cpp, cpp_rd_addr, (u32 *)dest);
+ } else {
+ bytes_read = nfp_cpp_read(pf->cpp, cpp_id, cpp_rd_addr,
+ dest, reg_sz);
+ err = bytes_read == reg_sz ? 0 : -EIO;
+ }
+ if (err) {
+ dump_header->error = cpu_to_be32(err);
+ dump_header->error_offset = cpu_to_be32(cpp_rd_addr);
+ break;
+ }
+ cpp_rd_addr += reg_sz;
+ dest += reg_sz;
+ }
+
+ return 0;
+}
+
+/* Write context to CSRCtxPtr, then read from it. Then the value can be read
+ * from IndCtxStatus.
+ */
+static int
+nfp_read_indirect_csr(struct nfp_cpp *cpp,
+ struct nfp_dumpspec_cpp_isl_id cpp_params, u32 offset,
+ u32 reg_sz, u32 context, void *dest)
+{
+ u32 csr_ctx_ptr_offs;
+ u32 cpp_id;
+ int result;
+
+ csr_ctx_ptr_offs = nfp_get_ind_csr_ctx_ptr_offs(offset);
+ cpp_id = NFP_CPP_ISLAND_ID(cpp_params.target,
+ NFP_IND_ME_REFL_WR_SIG_INIT,
+ cpp_params.token, cpp_params.island);
+ result = nfp_cpp_writel(cpp, cpp_id, csr_ctx_ptr_offs, context);
+ if (result)
+ return result;
+
+ cpp_id = nfp_get_numeric_cpp_id(&cpp_params);
+ result = nfp_cpp_read(cpp, cpp_id, csr_ctx_ptr_offs, dest, reg_sz);
+ if (result != reg_sz)
+ return result < 0 ? result : -EIO;
+
+ result = nfp_cpp_read(cpp, cpp_id, offset, dest, reg_sz);
+ if (result != reg_sz)
+ return result < 0 ? result : -EIO;
+
+ return 0;
+}
+
+static int
+nfp_read_all_indirect_csr_ctx(struct nfp_cpp *cpp,
+ struct nfp_dumpspec_csr *spec_csr, u32 address,
+ u32 reg_sz, void *dest)
+{
+ u32 ctx;
+ int err;
+
+ for (ctx = 0; ctx < NFP_IND_NUM_CONTEXTS; ctx++) {
+ err = nfp_read_indirect_csr(cpp, spec_csr->cpp.cpp_id, address,
+ reg_sz, ctx, dest + ctx * reg_sz);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+nfp_dump_indirect_csr_range(struct nfp_pf *pf,
+ struct nfp_dumpspec_csr *spec_csr,
+ struct nfp_dump_state *dump)
+{
+ struct nfp_dump_csr *dump_header = dump->p;
+ u32 reg_sz, header_size, total_size;
+ u32 cpp_rd_addr, max_rd_addr;
+ u32 reg_data_length;
+ void *dest;
+ int err;
+
+ if (!nfp_csr_spec_valid(spec_csr))
+ return nfp_dump_error_tlv(&spec_csr->tl, -EINVAL, dump);
+
+ reg_sz = be32_to_cpu(spec_csr->register_width) / BITS_PER_BYTE;
+ header_size = ALIGN8(sizeof(*dump_header));
+ reg_data_length = be32_to_cpu(spec_csr->cpp.dump_length) *
+ NFP_IND_NUM_CONTEXTS;
+ total_size = header_size + ALIGN8(reg_data_length);
+ dest = dump->p + header_size;
+
+ err = nfp_add_tlv(be32_to_cpu(spec_csr->tl.type), total_size, dump);
+ if (err)
+ return err;
+
+ dump_header->cpp = spec_csr->cpp;
+ dump_header->register_width = spec_csr->register_width;
+
+ cpp_rd_addr = be32_to_cpu(spec_csr->cpp.offset);
+ max_rd_addr = cpp_rd_addr + be32_to_cpu(spec_csr->cpp.dump_length);
+ while (cpp_rd_addr < max_rd_addr) {
+ err = nfp_read_all_indirect_csr_ctx(pf->cpp, spec_csr,
+ cpp_rd_addr, reg_sz, dest);
+ if (err) {
+ dump_header->error = cpu_to_be32(err);
+ dump_header->error_offset = cpu_to_be32(cpp_rd_addr);
+ break;
+ }
+ cpp_rd_addr += reg_sz;
+ dest += reg_sz * NFP_IND_NUM_CONTEXTS;
+ }
+
+ return 0;
+}
+
+static int
+nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec,
+ struct nfp_dump_state *dump)
+{
+ struct nfp_dump_rtsym *dump_header = dump->p;
+ struct nfp_dumpspec_cpp_isl_id cpp_params;
+ struct nfp_rtsym_table *rtbl = pf->rtbl;
+ u32 header_size, total_size, sym_size;
+ const struct nfp_rtsym *sym;
+ u32 tl_len, key_len;
+ int bytes_read;
+ u32 cpp_id;
+ void *dest;
+ int err;
+
+ tl_len = be32_to_cpu(spec->tl.length);
+ key_len = strnlen(spec->rtsym, tl_len);
+ if (key_len == tl_len)
+ return nfp_dump_error_tlv(&spec->tl, -EINVAL, dump);
+
+ sym = nfp_rtsym_lookup(rtbl, spec->rtsym);
+ if (!sym)
+ return nfp_dump_error_tlv(&spec->tl, -ENOENT, dump);
+
+ if (sym->type == NFP_RTSYM_TYPE_ABS)
+ sym_size = sizeof(sym->addr);
+ else
+ sym_size = sym->size;
+
+ header_size =
+ ALIGN8(offsetof(struct nfp_dump_rtsym, rtsym) + key_len + 1);
+ total_size = header_size + ALIGN8(sym_size);
+ dest = dump->p + header_size;
+
+ err = nfp_add_tlv(be32_to_cpu(spec->tl.type), total_size, dump);
+ if (err)
+ return err;
+
+ dump_header->padded_name_length =
+ header_size - offsetof(struct nfp_dump_rtsym, rtsym);
+ memcpy(dump_header->rtsym, spec->rtsym, key_len + 1);
+ dump_header->cpp.dump_length = cpu_to_be32(sym_size);
+
+ if (sym->type == NFP_RTSYM_TYPE_ABS) {
+ *(u64 *)dest = sym->addr;
+ } else {
+ cpp_params.target = sym->target;
+ cpp_params.action = NFP_CPP_ACTION_RW;
+ cpp_params.token = 0;
+ cpp_params.island = sym->domain;
+ cpp_id = nfp_get_numeric_cpp_id(&cpp_params);
+ dump_header->cpp.cpp_id = cpp_params;
+ dump_header->cpp.offset = cpu_to_be32(sym->addr);
+ bytes_read = nfp_cpp_read(pf->cpp, cpp_id, sym->addr, dest,
+ sym_size);
+ if (bytes_read != sym_size) {
+ if (bytes_read >= 0)
+ bytes_read = -EIO;
+ dump_header->error = cpu_to_be32(bytes_read);
+ }
+ }
+
+ return 0;
+}
+
+static int
+nfp_dump_for_tlv(struct nfp_pf *pf, struct nfp_dump_tl *tl, void *param)
+{
+ struct nfp_dumpspec_rtsym *spec_rtsym;
+ struct nfp_dump_state *dump = param;
+ struct nfp_dumpspec_csr *spec_csr;
+ int err;
+
+ switch (be32_to_cpu(tl->type)) {
+ case NFP_DUMPSPEC_TYPE_FWNAME:
+ err = nfp_dump_fwname(pf, dump);
+ if (err)
+ return err;
+ break;
+ case NFP_DUMPSPEC_TYPE_CPP_CSR:
+ case NFP_DUMPSPEC_TYPE_XPB_CSR:
+ case NFP_DUMPSPEC_TYPE_ME_CSR:
+ spec_csr = (struct nfp_dumpspec_csr *)tl;
+ err = nfp_dump_csr_range(pf, spec_csr, dump);
+ if (err)
+ return err;
+ break;
+ case NFP_DUMPSPEC_TYPE_INDIRECT_ME_CSR:
+ spec_csr = (struct nfp_dumpspec_csr *)tl;
+ err = nfp_dump_indirect_csr_range(pf, spec_csr, dump);
+ if (err)
+ return err;
+ break;
+ case NFP_DUMPSPEC_TYPE_RTSYM:
+ spec_rtsym = (struct nfp_dumpspec_rtsym *)tl;
+ err = nfp_dump_single_rtsym(pf, spec_rtsym, dump);
+ if (err)
+ return err;
+ break;
+ case NFP_DUMPSPEC_TYPE_HWINFO:
+ err = nfp_dump_hwinfo(pf, tl, dump);
+ if (err)
+ return err;
+ break;
+ case NFP_DUMPSPEC_TYPE_HWINFO_FIELD:
+ err = nfp_dump_hwinfo_field(pf, tl, dump);
+ if (err)
+ return err;
+ break;
+ default:
+ err = nfp_dump_error_tlv(tl, -EOPNOTSUPP, dump);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+nfp_dump_specific_level(struct nfp_pf *pf, struct nfp_dump_tl *dump_level,
+ void *param)
+{
+ struct nfp_dump_state *dump = param;
+
+ if (dump_level->type != dump->requested_level)
+ return 0;
+
+ return nfp_traverse_tlvs(pf, dump_level->data,
+ be32_to_cpu(dump_level->length), dump,
+ nfp_dump_for_tlv);
+}
+
+static int nfp_dump_populate_prolog(struct nfp_dump_state *dump)
+{
+ struct nfp_dump_prolog *prolog = dump->p;
+ u32 total_size;
+ int err;
+
+ total_size = ALIGN8(sizeof(*prolog));
+
+ err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_PROLOG, total_size, dump);
+ if (err)
+ return err;
+
+ prolog->dump_level = dump->requested_level;
+
+ return 0;
+}
+
+int nfp_net_dump_populate_buffer(struct nfp_pf *pf, struct nfp_dumpspec *spec,
+ struct ethtool_dump *dump_param, void *dest)
+{
+ struct nfp_dump_state dump;
+ int err;
+
+ dump.requested_level = cpu_to_be32(dump_param->flag);
+ dump.dumped_size = 0;
+ dump.p = dest;
+ dump.buf_size = dump_param->len;
+
+ err = nfp_dump_populate_prolog(&dump);
+ if (err)
+ return err;
+
+ err = nfp_traverse_tlvs(pf, spec->data, spec->size, &dump,
+ nfp_dump_specific_level);
+ if (err)
+ return err;
+
+ /* Set size of actual dump, to trigger warning if different from
+ * calculated size.
+ */
+ dump_param->len = dump.dumped_size;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 6c02b2d6ba06..e1dae0616f52 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -47,18 +47,16 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/ethtool.h>
+#include <linux/firmware.h>
#include "nfpcore/nfp.h"
#include "nfpcore/nfp_nsp.h"
#include "nfp_app.h"
+#include "nfp_main.h"
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
#include "nfp_port.h"
-enum nfp_dump_diag {
- NFP_DUMP_NSP_DIAG = 0,
-};
-
struct nfp_et_stat {
char name[ETH_GSTRING_LEN];
int off;
@@ -1066,15 +1064,34 @@ exit_release:
return ret;
}
+/* Set the dump flag/level. Calculate the dump length for flag > 0 only (new TLV
+ * based dumps), since flag 0 (default) calculates the length in
+ * nfp_app_get_dump_flag(), and we need to support triggering a level 0 dump
+ * without setting the flag first, for backward compatibility.
+ */
static int nfp_app_set_dump(struct net_device *netdev, struct ethtool_dump *val)
{
struct nfp_app *app = nfp_app_from_netdev(netdev);
+ s64 len;
if (!app)
return -EOPNOTSUPP;
- if (val->flag != NFP_DUMP_NSP_DIAG)
- return -EINVAL;
+ if (val->flag == NFP_DUMP_NSP_DIAG) {
+ app->pf->dump_flag = val->flag;
+ return 0;
+ }
+
+ if (!app->pf->dumpspec)
+ return -EOPNOTSUPP;
+
+ len = nfp_net_dump_calculate_size(app->pf, app->pf->dumpspec,
+ val->flag);
+ if (len < 0)
+ return len;
+
+ app->pf->dump_flag = val->flag;
+ app->pf->dump_len = len;
return 0;
}
@@ -1082,14 +1099,37 @@ static int nfp_app_set_dump(struct net_device *netdev, struct ethtool_dump *val)
static int
nfp_app_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
{
- return nfp_dump_nsp_diag(nfp_app_from_netdev(netdev), dump, NULL);
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
+
+ if (!app)
+ return -EOPNOTSUPP;
+
+ if (app->pf->dump_flag == NFP_DUMP_NSP_DIAG)
+ return nfp_dump_nsp_diag(app, dump, NULL);
+
+ dump->flag = app->pf->dump_flag;
+ dump->len = app->pf->dump_len;
+
+ return 0;
}
static int
nfp_app_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
void *buffer)
{
- return nfp_dump_nsp_diag(nfp_app_from_netdev(netdev), dump, buffer);
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
+
+ if (!app)
+ return -EOPNOTSUPP;
+
+ if (app->pf->dump_flag == NFP_DUMP_NSP_DIAG)
+ return nfp_dump_nsp_diag(app, dump, buffer);
+
+ dump->flag = app->pf->dump_flag;
+ dump->len = app->pf->dump_len;
+
+ return nfp_net_dump_populate_buffer(app->pf, app->pf->dumpspec, dump,
+ buffer);
}
static int nfp_net_set_coalesce(struct net_device *netdev,
@@ -1230,6 +1270,57 @@ static int nfp_net_set_channels(struct net_device *netdev,
return nfp_net_set_num_rings(nn, total_rx, total_tx);
}
+static int
+nfp_net_flash_device(struct net_device *netdev, struct ethtool_flash *flash)
+{
+ const struct firmware *fw;
+ struct nfp_app *app;
+ struct nfp_nsp *nsp;
+ struct device *dev;
+ int err;
+
+ if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
+ return -EOPNOTSUPP;
+
+ app = nfp_app_from_netdev(netdev);
+ if (!app)
+ return -EOPNOTSUPP;
+
+ dev = &app->pdev->dev;
+
+ nsp = nfp_nsp_open(app->cpp);
+ if (IS_ERR(nsp)) {
+ err = PTR_ERR(nsp);
+ dev_err(dev, "Failed to access the NSP: %d\n", err);
+ return err;
+ }
+
+ err = request_firmware_direct(&fw, flash->data, dev);
+ if (err)
+ goto exit_close_nsp;
+
+ dev_info(dev, "Please be patient while writing flash image: %s\n",
+ flash->data);
+ dev_hold(netdev);
+ rtnl_unlock();
+
+ err = nfp_nsp_write_flash(nsp, fw);
+ if (err < 0) {
+ dev_err(dev, "Flash write failed: %d\n", err);
+ goto exit_rtnl_lock;
+ }
+ dev_info(dev, "Finished writing flash image\n");
+
+exit_rtnl_lock:
+ rtnl_lock();
+ dev_put(netdev);
+ release_firmware(fw);
+
+exit_close_nsp:
+ nfp_nsp_close(nsp);
+ return err;
+}
+
static const struct ethtool_ops nfp_net_ethtool_ops = {
.get_drvinfo = nfp_net_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -1240,6 +1331,7 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.get_sset_count = nfp_net_get_sset_count,
.get_rxnfc = nfp_net_get_rxnfc,
.set_rxnfc = nfp_net_set_rxnfc,
+ .flash_device = nfp_net_flash_device,
.get_rxfh_indir_size = nfp_net_get_rxfh_indir_size,
.get_rxfh_key_size = nfp_net_get_rxfh_key_size,
.get_rxfh = nfp_net_get_rxfh,
@@ -1265,6 +1357,7 @@ const struct ethtool_ops nfp_port_ethtool_ops = {
.get_strings = nfp_port_get_strings,
.get_ethtool_stats = nfp_port_get_stats,
.get_sset_count = nfp_port_get_sset_count,
+ .flash_device = nfp_net_flash_device,
.set_dump = nfp_app_set_dump,
.get_dump_flag = nfp_app_get_dump_flag,
.get_dump_data = nfp_app_get_dump_data,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index c505014121c4..15fa47f622aa 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -208,12 +208,6 @@ nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
{
int err;
- /* Get ME clock frequency from ctrl BAR
- * XXX for now frequency is hardcoded until we figure out how
- * to get the value from nfp-hwinfo into ctrl bar
- */
- nn->me_freq_mhz = 1200;
-
err = nfp_net_init(nn);
if (err)
return err;
@@ -373,7 +367,9 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
if (IS_ERR(pf->app))
return PTR_ERR(pf->app);
+ mutex_lock(&pf->lock);
err = nfp_app_init(pf->app);
+ mutex_unlock(&pf->lock);
if (err)
goto err_free;
@@ -401,7 +397,9 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
err_unmap:
nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
err_app_clean:
+ mutex_lock(&pf->lock);
nfp_app_clean(pf->app);
+ mutex_unlock(&pf->lock);
err_free:
nfp_app_free(pf->app);
pf->app = NULL;
@@ -414,7 +412,11 @@ static void nfp_net_pf_app_clean(struct nfp_pf *pf)
nfp_net_pf_free_vnic(pf, pf->ctrl_vnic);
nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
}
+
+ mutex_lock(&pf->lock);
nfp_app_clean(pf->app);
+ mutex_unlock(&pf->lock);
+
nfp_app_free(pf->app);
pf->app = NULL;
}
@@ -570,17 +572,6 @@ err_unmap_ctrl:
return err;
}
-static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
-{
- nfp_net_pf_app_stop(pf);
- /* stop app first, to avoid double free of ctrl vNIC's ddir */
- nfp_net_debugfs_dir_clean(&pf->ddir);
-
- nfp_net_pf_free_irqs(pf);
- nfp_net_pf_app_clean(pf);
- nfp_net_pci_unmap_mem(pf);
-}
-
static int
nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
struct nfp_eth_table *eth_table)
@@ -655,9 +646,6 @@ int nfp_net_refresh_port_table_sync(struct nfp_pf *pf)
nfp_net_pf_free_vnic(pf, nn);
}
- if (list_empty(&pf->vnics))
- nfp_net_pci_remove_finish(pf);
-
return 0;
}
@@ -707,6 +695,7 @@ int nfp_net_refresh_eth_port(struct nfp_port *port)
*/
int nfp_net_pci_probe(struct nfp_pf *pf)
{
+ struct devlink *devlink = priv_to_devlink(pf);
struct nfp_net_fw_version fw_ver;
u8 __iomem *ctrl_bar, *qc_bar;
int stride;
@@ -720,16 +709,13 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
return -EINVAL;
}
- mutex_lock(&pf->lock);
pf->max_data_vnics = nfp_net_pf_get_num_ports(pf);
- if ((int)pf->max_data_vnics < 0) {
- err = pf->max_data_vnics;
- goto err_unlock;
- }
+ if ((int)pf->max_data_vnics < 0)
+ return pf->max_data_vnics;
err = nfp_net_pci_map_mem(pf);
if (err)
- goto err_unlock;
+ return err;
ctrl_bar = nfp_cpp_area_iomem(pf->data_vnic_bar);
qc_bar = nfp_cpp_area_iomem(pf->qc_area);
@@ -768,6 +754,11 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_unmap;
+ err = devlink_register(devlink, &pf->pdev->dev);
+ if (err)
+ goto err_app_clean;
+
+ mutex_lock(&pf->lock);
pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
/* Allocate the vnics and do basic init */
@@ -799,32 +790,39 @@ err_free_vnics:
nfp_net_pf_free_vnics(pf);
err_clean_ddir:
nfp_net_debugfs_dir_clean(&pf->ddir);
+ mutex_unlock(&pf->lock);
+ cancel_work_sync(&pf->port_refresh_work);
+ devlink_unregister(devlink);
+err_app_clean:
nfp_net_pf_app_clean(pf);
err_unmap:
nfp_net_pci_unmap_mem(pf);
-err_unlock:
- mutex_unlock(&pf->lock);
- cancel_work_sync(&pf->port_refresh_work);
return err;
}
void nfp_net_pci_remove(struct nfp_pf *pf)
{
- struct nfp_net *nn;
+ struct nfp_net *nn, *next;
mutex_lock(&pf->lock);
- if (list_empty(&pf->vnics))
- goto out;
-
- list_for_each_entry(nn, &pf->vnics, vnic_list)
- if (nfp_net_is_data_vnic(nn))
- nfp_net_pf_clean_vnic(pf, nn);
+ list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
+ if (!nfp_net_is_data_vnic(nn))
+ continue;
+ nfp_net_pf_clean_vnic(pf, nn);
+ nfp_net_pf_free_vnic(pf, nn);
+ }
- nfp_net_pf_free_vnics(pf);
+ nfp_net_pf_app_stop(pf);
+ /* stop app first, to avoid double free of ctrl vNIC's ddir */
+ nfp_net_debugfs_dir_clean(&pf->ddir);
- nfp_net_pci_remove_finish(pf);
-out:
mutex_unlock(&pf->lock);
+ devlink_unregister(priv_to_devlink(pf));
+
+ nfp_net_pf_free_irqs(pf);
+ nfp_net_pf_app_clean(pf);
+ nfp_net_pci_unmap_mem(pf);
+
cancel_work_sync(&pf->port_refresh_work);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 78b36c67c232..f67da6bde9da 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -46,6 +46,13 @@
#include "nfp_net_sriov.h"
#include "nfp_port.h"
+struct net_device *
+nfp_repr_get_locked(struct nfp_app *app, struct nfp_reprs *set, unsigned int id)
+{
+ return rcu_dereference_protected(set->reprs[id],
+ lockdep_is_held(&app->pf->lock));
+}
+
static void
nfp_repr_inc_tx_stats(struct net_device *netdev, unsigned int len,
int tx_status)
@@ -186,6 +193,13 @@ nfp_repr_get_offload_stats(int attr_id, const struct net_device *dev,
return -EINVAL;
}
+static int nfp_repr_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+
+ return nfp_app_change_mtu(repr->app, netdev, new_mtu);
+}
+
static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct nfp_repr *repr = netdev_priv(netdev);
@@ -240,6 +254,7 @@ const struct net_device_ops nfp_repr_netdev_ops = {
.ndo_open = nfp_repr_open,
.ndo_stop = nfp_repr_stop,
.ndo_start_xmit = nfp_repr_xmit,
+ .ndo_change_mtu = nfp_repr_change_mtu,
.ndo_get_stats64 = nfp_repr_get_stats64,
.ndo_has_offload_stats = nfp_repr_has_offload_stats,
.ndo_get_offload_stats = nfp_repr_get_offload_stats,
@@ -336,6 +351,8 @@ struct net_device *nfp_repr_alloc(struct nfp_app *app)
if (!netdev)
return NULL;
+ netif_carrier_off(netdev);
+
repr = netdev_priv(netdev);
repr->netdev = netdev;
repr->app = app;
@@ -359,29 +376,45 @@ static void nfp_repr_clean_and_free(struct nfp_repr *repr)
nfp_repr_free(repr);
}
-void nfp_reprs_clean_and_free(struct nfp_reprs *reprs)
+void nfp_reprs_clean_and_free(struct nfp_app *app, struct nfp_reprs *reprs)
{
+ struct net_device *netdev;
unsigned int i;
- for (i = 0; i < reprs->num_reprs; i++)
- if (reprs->reprs[i])
- nfp_repr_clean_and_free(netdev_priv(reprs->reprs[i]));
+ for (i = 0; i < reprs->num_reprs; i++) {
+ netdev = nfp_repr_get_locked(app, reprs, i);
+ if (netdev)
+ nfp_repr_clean_and_free(netdev_priv(netdev));
+ }
kfree(reprs);
}
void
-nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
- enum nfp_repr_type type)
+nfp_reprs_clean_and_free_by_type(struct nfp_app *app, enum nfp_repr_type type)
{
+ struct net_device *netdev;
struct nfp_reprs *reprs;
+ int i;
- reprs = nfp_app_reprs_set(app, type, NULL);
+ reprs = rcu_dereference_protected(app->reprs[type],
+ lockdep_is_held(&app->pf->lock));
if (!reprs)
return;
+ /* Preclean must happen before we remove the reprs reference from the
+ * app below.
+ */
+ for (i = 0; i < reprs->num_reprs; i++) {
+ netdev = nfp_repr_get_locked(app, reprs, i);
+ if (netdev)
+ nfp_app_repr_preclean(app, netdev);
+ }
+
+ reprs = nfp_app_reprs_set(app, type, NULL);
+
synchronize_rcu();
- nfp_reprs_clean_and_free(reprs);
+ nfp_reprs_clean_and_free(app, reprs);
}
struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs)
@@ -399,47 +432,29 @@ struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs)
int nfp_reprs_resync_phys_ports(struct nfp_app *app)
{
- struct nfp_reprs *reprs, *old_reprs;
+ struct net_device *netdev;
+ struct nfp_reprs *reprs;
struct nfp_repr *repr;
int i;
- old_reprs =
- rcu_dereference_protected(app->reprs[NFP_REPR_TYPE_PHYS_PORT],
- lockdep_is_held(&app->pf->lock));
- if (!old_reprs)
- return 0;
-
- reprs = nfp_reprs_alloc(old_reprs->num_reprs);
+ reprs = nfp_reprs_get_locked(app, NFP_REPR_TYPE_PHYS_PORT);
if (!reprs)
- return -ENOMEM;
-
- for (i = 0; i < old_reprs->num_reprs; i++) {
- if (!old_reprs->reprs[i])
- continue;
-
- repr = netdev_priv(old_reprs->reprs[i]);
- if (repr->port->type == NFP_PORT_INVALID)
- continue;
-
- reprs->reprs[i] = old_reprs->reprs[i];
- }
-
- old_reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
- synchronize_rcu();
+ return 0;
- /* Now we free up removed representors */
- for (i = 0; i < old_reprs->num_reprs; i++) {
- if (!old_reprs->reprs[i])
+ for (i = 0; i < reprs->num_reprs; i++) {
+ netdev = nfp_repr_get_locked(app, reprs, i);
+ if (!netdev)
continue;
- repr = netdev_priv(old_reprs->reprs[i]);
+ repr = netdev_priv(netdev);
if (repr->port->type != NFP_PORT_INVALID)
continue;
- nfp_app_repr_stop(app, repr);
+ nfp_app_repr_preclean(app, netdev);
+ rcu_assign_pointer(reprs->reprs[i], NULL);
+ synchronize_rcu();
nfp_repr_clean(repr);
}
- kfree(old_reprs);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
index 5d4d897bc9c6..a621e8ff528e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
@@ -35,6 +35,7 @@
#define NFP_NET_REPR_H
struct metadata_dst;
+struct nfp_app;
struct nfp_net;
struct nfp_port;
@@ -47,7 +48,7 @@ struct nfp_port;
*/
struct nfp_reprs {
unsigned int num_reprs;
- struct net_device *reprs[0];
+ struct net_device __rcu *reprs[0];
};
/**
@@ -89,6 +90,7 @@ struct nfp_repr {
* @NFP_REPR_TYPE_PHYS_PORT: external NIC port
* @NFP_REPR_TYPE_PF: physical function
* @NFP_REPR_TYPE_VF: virtual function
+ * @__NFP_REPR_TYPE_MAX: number of representor types
*/
enum nfp_repr_type {
NFP_REPR_TYPE_PHYS_PORT,
@@ -113,16 +115,18 @@ static inline int nfp_repr_get_port_id(struct net_device *netdev)
return priv->dst->u.port_info.port_id;
}
+struct net_device *
+nfp_repr_get_locked(struct nfp_app *app, struct nfp_reprs *set,
+ unsigned int id);
+
void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len);
int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
u32 cmsg_port_id, struct nfp_port *port,
struct net_device *pf_netdev);
struct net_device *nfp_repr_alloc(struct nfp_app *app);
-void
-nfp_reprs_clean_and_free(struct nfp_reprs *reprs);
-void
-nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
- enum nfp_repr_type type);
+void nfp_reprs_clean_and_free(struct nfp_app *app, struct nfp_reprs *reprs);
+void nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
+ enum nfp_repr_type type);
struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs);
int nfp_reprs_resync_phys_ports(struct nfp_app *app);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index c879626e035b..b802a1d55449 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -277,12 +277,6 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
}
nfp_net_irqs_assign(nn, vf->irq_entries, num_irqs);
- /* Get ME clock frequency from ctrl BAR
- * XXX for now frequency is hardcoded until we figure out how
- * to get the value from nfp-hwinfo into ctrl bar
- */
- nn->me_freq_mhz = 1200;
-
err = nfp_net_init(nn);
if (err)
goto err_irqs_disable;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
index 3ce51f03126f..ced62d112aa2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
@@ -49,6 +49,8 @@
struct nfp_hwinfo;
struct nfp_hwinfo *nfp_hwinfo_read(struct nfp_cpp *cpp);
const char *nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup);
+char *nfp_hwinfo_get_packed_strings(struct nfp_hwinfo *hwinfo);
+u32 nfp_hwinfo_get_packed_str_size(struct nfp_hwinfo *hwinfo);
/* Implemented in nfp_nsp.c, low level functions */
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
index 5798adc57cbc..c8f2c064cce3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
@@ -242,6 +242,7 @@ int nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset,
void *buffer, size_t length);
int nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset,
const void *buffer, size_t length);
+size_t nfp_cpp_area_size(struct nfp_cpp_area *area);
const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area);
void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area);
struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index 04dd5758ecf5..ef30597aa319 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -372,8 +372,7 @@ nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 dest,
* that it can be accessed directly.
*
* NOTE: @address and @size must be 32-bit aligned values.
- *
- * NOTE: The area must also be 'released' when the structure is freed.
+ * The area must also be 'released' when the structure is freed.
*
* Return: NFP CPP Area handle, or NULL
*/
@@ -536,8 +535,7 @@ void nfp_cpp_area_release_free(struct nfp_cpp_area *area)
* Read data from indicated CPP region.
*
* NOTE: @offset and @length must be 32-bit aligned values.
- *
- * NOTE: Area must have been locked down with an 'acquire'.
+ * Area must have been locked down with an 'acquire'.
*
* Return: length of io, or -ERRNO
*/
@@ -558,8 +556,7 @@ int nfp_cpp_area_read(struct nfp_cpp_area *area,
* Write data to indicated CPP region.
*
* NOTE: @offset and @length must be 32-bit aligned values.
- *
- * NOTE: Area must have been locked down with an 'acquire'.
+ * Area must have been locked down with an 'acquire'.
*
* Return: length of io, or -ERRNO
*/
@@ -571,6 +568,17 @@ int nfp_cpp_area_write(struct nfp_cpp_area *area,
}
/**
+ * nfp_cpp_area_size() - return size of a CPP area
+ * @cpp_area: CPP area handle
+ *
+ * Return: Size of the area
+ */
+size_t nfp_cpp_area_size(struct nfp_cpp_area *cpp_area)
+{
+ return cpp_area->size;
+}
+
+/**
* nfp_cpp_area_name() - return name of a CPP area
* @cpp_area: CPP area handle
*
@@ -666,18 +674,20 @@ void __iomem *nfp_cpp_area_iomem(struct nfp_cpp_area *area)
* @offset: Offset into area
* @value: Pointer to read buffer
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_area_readl(struct nfp_cpp_area *area,
unsigned long offset, u32 *value)
{
u8 tmp[4];
- int err;
+ int n;
- err = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
- *value = get_unaligned_le32(tmp);
+ n = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
+ if (n != sizeof(tmp))
+ return n < 0 ? n : -EIO;
- return err;
+ *value = get_unaligned_le32(tmp);
+ return 0;
}
/**
@@ -686,16 +696,18 @@ int nfp_cpp_area_readl(struct nfp_cpp_area *area,
* @offset: Offset into area
* @value: Value to write
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_area_writel(struct nfp_cpp_area *area,
unsigned long offset, u32 value)
{
u8 tmp[4];
+ int n;
put_unaligned_le32(value, tmp);
+ n = nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
- return nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
+ return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO;
}
/**
@@ -704,18 +716,20 @@ int nfp_cpp_area_writel(struct nfp_cpp_area *area,
* @offset: Offset into area
* @value: Pointer to read buffer
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_area_readq(struct nfp_cpp_area *area,
unsigned long offset, u64 *value)
{
u8 tmp[8];
- int err;
+ int n;
- err = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
- *value = get_unaligned_le64(tmp);
+ n = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
+ if (n != sizeof(tmp))
+ return n < 0 ? n : -EIO;
- return err;
+ *value = get_unaligned_le64(tmp);
+ return 0;
}
/**
@@ -724,16 +738,18 @@ int nfp_cpp_area_readq(struct nfp_cpp_area *area,
* @offset: Offset into area
* @value: Value to write
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_area_writeq(struct nfp_cpp_area *area,
unsigned long offset, u64 value)
{
u8 tmp[8];
+ int n;
put_unaligned_le64(value, tmp);
+ n = nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
- return nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
+ return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO;
}
/**
@@ -1072,7 +1088,7 @@ static u32 nfp_xpb_to_cpp(struct nfp_cpp *cpp, u32 *xpb_addr)
* @xpb_addr: Address for operation
* @value: Pointer to read buffer
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_addr, u32 *value)
{
@@ -1087,7 +1103,7 @@ int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_addr, u32 *value)
* @xpb_addr: Address for operation
* @value: Value to write
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_addr, u32 value)
{
@@ -1105,7 +1121,7 @@ int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_addr, u32 value)
*
* KERNEL: This operation is safe to call in interrupt or softirq context.
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_xpb_writelm(struct nfp_cpp *cpp, u32 xpb_tgt,
u32 mask, u32 value)
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
index ab86bceb93f2..20bad05e2e92 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
@@ -64,18 +64,20 @@
* @address: Address for operation
* @value: Pointer to read buffer
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_readl(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, u32 *value)
{
u8 tmp[4];
- int err;
+ int n;
- err = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp));
- *value = get_unaligned_le32(tmp);
+ n = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp));
+ if (n != sizeof(tmp))
+ return n < 0 ? n : -EIO;
- return err;
+ *value = get_unaligned_le32(tmp);
+ return 0;
}
/**
@@ -85,15 +87,18 @@ int nfp_cpp_readl(struct nfp_cpp *cpp, u32 cpp_id,
* @address: Address for operation
* @value: Value to write
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_writel(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, u32 value)
{
u8 tmp[4];
+ int n;
put_unaligned_le32(value, tmp);
- return nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp));
+ n = nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp));
+
+ return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO;
}
/**
@@ -103,18 +108,20 @@ int nfp_cpp_writel(struct nfp_cpp *cpp, u32 cpp_id,
* @address: Address for operation
* @value: Pointer to read buffer
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_readq(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, u64 *value)
{
u8 tmp[8];
- int err;
+ int n;
- err = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp));
- *value = get_unaligned_le64(tmp);
+ n = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp));
+ if (n != sizeof(tmp))
+ return n < 0 ? n : -EIO;
- return err;
+ *value = get_unaligned_le64(tmp);
+ return 0;
}
/**
@@ -124,15 +131,18 @@ int nfp_cpp_readq(struct nfp_cpp *cpp, u32 cpp_id,
* @address: Address for operation
* @value: Value to write
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_writeq(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, u64 value)
{
u8 tmp[8];
+ int n;
put_unaligned_le64(value, tmp);
- return nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp));
+ n = nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp));
+
+ return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO;
}
/* NOTE: This code should not use nfp_xpb_* functions,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
index 4f24aff1e772..063a9a6243d6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
@@ -302,3 +302,13 @@ const char *nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup)
return NULL;
}
+
+char *nfp_hwinfo_get_packed_strings(struct nfp_hwinfo *hwinfo)
+{
+ return hwinfo->data;
+}
+
+u32 nfp_hwinfo_get_packed_str_size(struct nfp_hwinfo *hwinfo)
+{
+ return le32_to_cpu(hwinfo->size) - sizeof(u32);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 14a6d1ba51a9..39abac678b71 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -51,6 +51,9 @@
#include "nfp_cpp.h"
#include "nfp_nsp.h"
+#define NFP_NSP_TIMEOUT_DEFAULT 30
+#define NFP_NSP_TIMEOUT_BOOT 30
+
/* Offsets relative to the CSR base */
#define NSP_STATUS 0x00
#define NSP_STATUS_MAGIC GENMASK_ULL(63, 48)
@@ -93,6 +96,7 @@ enum nfp_nsp_cmd {
SPCODE_FW_LOAD = 6, /* Load fw from buffer, len in option */
SPCODE_ETH_RESCAN = 7, /* Rescan ETHs, write ETH_TABLE to buf */
SPCODE_ETH_CONTROL = 8, /* Update media config from buffer */
+ SPCODE_NSP_WRITE_FLASH = 11, /* Load and flash image from buffer */
SPCODE_NSP_SENSORS = 12, /* Read NSP sensor(s) */
SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */
};
@@ -260,10 +264,10 @@ u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state)
}
static int
-nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
- u32 nsp_cpp, u64 addr, u64 mask, u64 val)
+nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, u32 nsp_cpp, u64 addr,
+ u64 mask, u64 val, u32 timeout_sec)
{
- const unsigned long wait_until = jiffies + 30 * HZ;
+ const unsigned long wait_until = jiffies + timeout_sec * HZ;
int err;
for (;;) {
@@ -285,12 +289,13 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
}
/**
- * nfp_nsp_command() - Execute a command on the NFP Service Processor
+ * __nfp_nsp_command() - Execute a command on the NFP Service Processor
* @state: NFP SP state
* @code: NFP SP Command Code
* @option: NFP SP Command Argument
* @buff_cpp: NFP SP Buffer CPP Address info
* @buff_addr: NFP SP Buffer Host address
+ * @timeout_sec:Timeout value to wait for completion in seconds
*
* Return: 0 for success with no result
*
@@ -300,10 +305,11 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
* -ENODEV if the NSP is not a supported model
* -EBUSY if the NSP is stuck
* -EINTR if interrupted while waiting for completion
- * -ETIMEDOUT if the NSP took longer than 30 seconds to complete
+ * -ETIMEDOUT if the NSP took longer than @timeout_sec seconds to complete
*/
-static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
- u32 buff_cpp, u64 buff_addr)
+static int
+__nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp,
+ u64 buff_addr, u32 timeout_sec)
{
u64 reg, ret_val, nsp_base, nsp_buffer, nsp_status, nsp_command;
struct nfp_cpp *cpp = state->cpp;
@@ -341,8 +347,8 @@ static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
return err;
/* Wait for NSP_COMMAND_START to go to 0 */
- err = nfp_nsp_wait_reg(cpp, &reg,
- nsp_cpp, nsp_command, NSP_COMMAND_START, 0);
+ err = nfp_nsp_wait_reg(cpp, &reg, nsp_cpp, nsp_command,
+ NSP_COMMAND_START, 0, NFP_NSP_TIMEOUT_DEFAULT);
if (err) {
nfp_err(cpp, "Error %d waiting for code 0x%04x to start\n",
err, code);
@@ -350,8 +356,8 @@ static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
}
/* Wait for NSP_STATUS_BUSY to go to 0 */
- err = nfp_nsp_wait_reg(cpp, &reg,
- nsp_cpp, nsp_status, NSP_STATUS_BUSY, 0);
+ err = nfp_nsp_wait_reg(cpp, &reg, nsp_cpp, nsp_status, NSP_STATUS_BUSY,
+ 0, timeout_sec);
if (err) {
nfp_err(cpp, "Error %d waiting for code 0x%04x to complete\n",
err, code);
@@ -374,9 +380,18 @@ static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
return ret_val;
}
-static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
- const void *in_buf, unsigned int in_size,
- void *out_buf, unsigned int out_size)
+static int
+nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp,
+ u64 buff_addr)
+{
+ return __nfp_nsp_command(state, code, option, buff_cpp, buff_addr,
+ NFP_NSP_TIMEOUT_DEFAULT);
+}
+
+static int
+__nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
+ const void *in_buf, unsigned int in_size, void *out_buf,
+ unsigned int out_size, u32 timeout_sec)
{
struct nfp_cpp *cpp = nsp->cpp;
unsigned int max_size;
@@ -429,7 +444,8 @@ static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
return err;
}
- ret = nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf);
+ ret = __nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf,
+ timeout_sec);
if (ret < 0)
return ret;
@@ -442,12 +458,23 @@ static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
return ret;
}
+static int
+nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
+ const void *in_buf, unsigned int in_size, void *out_buf,
+ unsigned int out_size)
+{
+ return __nfp_nsp_command_buf(nsp, code, option, in_buf, in_size,
+ out_buf, out_size,
+ NFP_NSP_TIMEOUT_DEFAULT);
+}
+
int nfp_nsp_wait(struct nfp_nsp *state)
{
- const unsigned long wait_until = jiffies + 30 * HZ;
+ const unsigned long wait_until = jiffies + NFP_NSP_TIMEOUT_BOOT * HZ;
int err;
- nfp_dbg(state->cpp, "Waiting for NSP to respond (30 sec max).\n");
+ nfp_dbg(state->cpp, "Waiting for NSP to respond (%u sec max).\n",
+ NFP_NSP_TIMEOUT_BOOT);
for (;;) {
const unsigned long start_time = jiffies;
@@ -488,6 +515,17 @@ int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw)
fw->size, NULL, 0);
}
+int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw)
+{
+ /* The flash time is specified to take a maximum of 70s so we add an
+ * additional factor to this spec time.
+ */
+ u32 timeout_sec = 2.5 * 70;
+
+ return __nfp_nsp_command_buf(state, SPCODE_NSP_WRITE_FLASH, fw->size,
+ fw->data, fw->size, NULL, 0, timeout_sec);
+}
+
int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size)
{
return nfp_nsp_command_buf(state, SPCODE_ETH_RESCAN, size, NULL, 0,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index 650ca1a5bd21..e983c9d7f86c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -48,6 +48,7 @@ u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state);
int nfp_nsp_wait(struct nfp_nsp *state);
int nfp_nsp_device_soft_reset(struct nfp_nsp *state);
int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw);
+int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw);
int nfp_nsp_mac_reinit(struct nfp_nsp *state);
static inline bool nfp_nsp_has_mac_reinit(struct nfp_nsp *state)
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
index ecda474ac7c3..46107aefad1c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
@@ -277,10 +277,6 @@ u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name,
break;
}
- if (err == sym->size)
- err = 0;
- else if (err >= 0)
- err = -EIO;
exit:
if (error)
*error = err;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 481876b5424c..66c665d0b926 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -793,9 +793,9 @@ struct fe_priv {
/* rx specific fields.
* Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
*/
- union ring_type get_rx, put_rx, first_rx, last_rx;
+ union ring_type get_rx, put_rx, last_rx;
struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
- struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
+ struct nv_skb_map *last_rx_ctx;
struct nv_skb_map *rx_skb;
union ring_type rx_ring;
@@ -822,9 +822,9 @@ struct fe_priv {
/*
* tx specific fields.
*/
- union ring_type get_tx, put_tx, first_tx, last_tx;
+ union ring_type get_tx, put_tx, last_tx;
struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
- struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
+ struct nv_skb_map *last_tx_ctx;
struct nv_skb_map *tx_skb;
union ring_type tx_ring;
@@ -1812,12 +1812,12 @@ static int nv_alloc_rx(struct net_device *dev)
struct ring_desc *less_rx;
less_rx = np->get_rx.orig;
- if (less_rx-- == np->first_rx.orig)
+ if (less_rx-- == np->rx_ring.orig)
less_rx = np->last_rx.orig;
while (np->put_rx.orig != less_rx) {
struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
- if (skb) {
+ if (likely(skb)) {
np->put_rx_ctx->skb = skb;
np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
skb->data,
@@ -1833,9 +1833,9 @@ static int nv_alloc_rx(struct net_device *dev)
wmb();
np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
- np->put_rx.orig = np->first_rx.orig;
+ np->put_rx.orig = np->rx_ring.orig;
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
- np->put_rx_ctx = np->first_rx_ctx;
+ np->put_rx_ctx = np->rx_skb;
} else {
packet_dropped:
u64_stats_update_begin(&np->swstats_rx_syncp);
@@ -1853,12 +1853,12 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
struct ring_desc_ex *less_rx;
less_rx = np->get_rx.ex;
- if (less_rx-- == np->first_rx.ex)
+ if (less_rx-- == np->rx_ring.ex)
less_rx = np->last_rx.ex;
while (np->put_rx.ex != less_rx) {
struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
- if (skb) {
+ if (likely(skb)) {
np->put_rx_ctx->skb = skb;
np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
skb->data,
@@ -1875,9 +1875,9 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
wmb();
np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
- np->put_rx.ex = np->first_rx.ex;
+ np->put_rx.ex = np->rx_ring.ex;
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
- np->put_rx_ctx = np->first_rx_ctx;
+ np->put_rx_ctx = np->rx_skb;
} else {
packet_dropped:
u64_stats_update_begin(&np->swstats_rx_syncp);
@@ -1903,13 +1903,15 @@ static void nv_init_rx(struct net_device *dev)
struct fe_priv *np = netdev_priv(dev);
int i;
- np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
+ np->get_rx = np->rx_ring;
+ np->put_rx = np->rx_ring;
if (!nv_optimized(np))
np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
else
np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
- np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
+ np->get_rx_ctx = np->rx_skb;
+ np->put_rx_ctx = np->rx_skb;
np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
for (i = 0; i < np->rx_ring_size; i++) {
@@ -1932,13 +1934,15 @@ static void nv_init_tx(struct net_device *dev)
struct fe_priv *np = netdev_priv(dev);
int i;
- np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
+ np->get_tx = np->tx_ring;
+ np->put_tx = np->tx_ring;
if (!nv_optimized(np))
np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
else
np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
- np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
+ np->get_tx_ctx = np->tx_skb;
+ np->put_tx_ctx = np->tx_skb;
np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
netdev_reset_queue(np->dev);
np->tx_pkts_in_progress = 0;
@@ -2248,9 +2252,9 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
offset += bcnt;
size -= bcnt;
if (unlikely(put_tx++ == np->last_tx.orig))
- put_tx = np->first_tx.orig;
+ put_tx = np->tx_ring.orig;
if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
- np->put_tx_ctx = np->first_tx_ctx;
+ np->put_tx_ctx = np->tx_skb;
} while (size);
/* setup the fragments */
@@ -2276,7 +2280,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
do {
nv_unmap_txskb(np, start_tx_ctx);
if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
- tmp_tx_ctx = np->first_tx_ctx;
+ tmp_tx_ctx = np->tx_skb;
} while (tmp_tx_ctx != np->put_tx_ctx);
dev_kfree_skb_any(skb);
np->put_tx_ctx = start_tx_ctx;
@@ -2294,18 +2298,18 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
offset += bcnt;
frag_size -= bcnt;
if (unlikely(put_tx++ == np->last_tx.orig))
- put_tx = np->first_tx.orig;
+ put_tx = np->tx_ring.orig;
if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
- np->put_tx_ctx = np->first_tx_ctx;
+ np->put_tx_ctx = np->tx_skb;
} while (frag_size);
}
- if (unlikely(put_tx == np->first_tx.orig))
+ if (unlikely(put_tx == np->tx_ring.orig))
prev_tx = np->last_tx.orig;
else
prev_tx = put_tx - 1;
- if (unlikely(np->put_tx_ctx == np->first_tx_ctx))
+ if (unlikely(np->put_tx_ctx == np->tx_skb))
prev_tx_ctx = np->last_tx_ctx;
else
prev_tx_ctx = np->put_tx_ctx - 1;
@@ -2406,9 +2410,9 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
offset += bcnt;
size -= bcnt;
if (unlikely(put_tx++ == np->last_tx.ex))
- put_tx = np->first_tx.ex;
+ put_tx = np->tx_ring.ex;
if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
- np->put_tx_ctx = np->first_tx_ctx;
+ np->put_tx_ctx = np->tx_skb;
} while (size);
/* setup the fragments */
@@ -2434,7 +2438,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
do {
nv_unmap_txskb(np, start_tx_ctx);
if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
- tmp_tx_ctx = np->first_tx_ctx;
+ tmp_tx_ctx = np->tx_skb;
} while (tmp_tx_ctx != np->put_tx_ctx);
dev_kfree_skb_any(skb);
np->put_tx_ctx = start_tx_ctx;
@@ -2452,18 +2456,18 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
offset += bcnt;
frag_size -= bcnt;
if (unlikely(put_tx++ == np->last_tx.ex))
- put_tx = np->first_tx.ex;
+ put_tx = np->tx_ring.ex;
if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
- np->put_tx_ctx = np->first_tx_ctx;
+ np->put_tx_ctx = np->tx_skb;
} while (frag_size);
}
- if (unlikely(put_tx == np->first_tx.ex))
+ if (unlikely(put_tx == np->tx_ring.ex))
prev_tx = np->last_tx.ex;
else
prev_tx = put_tx - 1;
- if (unlikely(np->put_tx_ctx == np->first_tx_ctx))
+ if (unlikely(np->put_tx_ctx == np->tx_skb))
prev_tx_ctx = np->last_tx_ctx;
else
prev_tx_ctx = np->put_tx_ctx - 1;
@@ -2563,7 +2567,7 @@ static int nv_tx_done(struct net_device *dev, int limit)
if (np->desc_ver == DESC_VER_1) {
if (flags & NV_TX_LASTPACKET) {
- if (flags & NV_TX_ERROR) {
+ if (unlikely(flags & NV_TX_ERROR)) {
if ((flags & NV_TX_RETRYERROR)
&& !(flags & NV_TX_RETRYCOUNT_MASK))
nv_legacybackoff_reseed(dev);
@@ -2580,7 +2584,7 @@ static int nv_tx_done(struct net_device *dev, int limit)
}
} else {
if (flags & NV_TX2_LASTPACKET) {
- if (flags & NV_TX2_ERROR) {
+ if (unlikely(flags & NV_TX2_ERROR)) {
if ((flags & NV_TX2_RETRYERROR)
&& !(flags & NV_TX2_RETRYCOUNT_MASK))
nv_legacybackoff_reseed(dev);
@@ -2597,9 +2601,9 @@ static int nv_tx_done(struct net_device *dev, int limit)
}
}
if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
- np->get_tx.orig = np->first_tx.orig;
+ np->get_tx.orig = np->tx_ring.orig;
if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
- np->get_tx_ctx = np->first_tx_ctx;
+ np->get_tx_ctx = np->tx_skb;
}
netdev_completed_queue(np->dev, tx_work, bytes_compl);
@@ -2626,7 +2630,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
nv_unmap_txskb(np, np->get_tx_ctx);
if (flags & NV_TX2_LASTPACKET) {
- if (flags & NV_TX2_ERROR) {
+ if (unlikely(flags & NV_TX2_ERROR)) {
if ((flags & NV_TX2_RETRYERROR)
&& !(flags & NV_TX2_RETRYCOUNT_MASK)) {
if (np->driver_data & DEV_HAS_GEAR_MODE)
@@ -2651,9 +2655,9 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
}
if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
- np->get_tx.ex = np->first_tx.ex;
+ np->get_tx.ex = np->tx_ring.ex;
if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
- np->get_tx_ctx = np->first_tx_ctx;
+ np->get_tx_ctx = np->tx_skb;
}
netdev_completed_queue(np->dev, tx_work, bytes_cleaned);
@@ -2909,9 +2913,9 @@ static int nv_rx_process(struct net_device *dev, int limit)
u64_stats_update_end(&np->swstats_rx_syncp);
next_pkt:
if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
- np->get_rx.orig = np->first_rx.orig;
+ np->get_rx.orig = np->rx_ring.orig;
if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
- np->get_rx_ctx = np->first_rx_ctx;
+ np->get_rx_ctx = np->rx_skb;
rx_work++;
}
@@ -2998,9 +3002,9 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
}
next_pkt:
if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
- np->get_rx.ex = np->first_rx.ex;
+ np->get_rx.ex = np->rx_ring.ex;
if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
- np->get_rx_ctx = np->first_rx_ctx;
+ np->get_rx_ctx = np->rx_skb;
rx_work++;
}
@@ -5507,11 +5511,9 @@ static int nv_open(struct net_device *dev)
/* One manual link speed update: Interrupts are enabled, future link
* speed changes cause interrupts and are handled by nv_link_irq().
*/
- {
- u32 miistat;
- miistat = readl(base + NvRegMIIStatus);
- writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
- }
+ readl(base + NvRegMIIStatus);
+ writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
+
/* set linkspeed to invalid value, thus force nv_update_linkspeed
* to init hw */
np->linkspeed = 0;
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index c9a55b774935..07a2eb3781b1 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -212,9 +212,7 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac)
return -ENOENT;
}
- if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
- &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5])
- != ETH_ALEN) {
+ if (!mac_pton(maddr, addr)) {
dev_warn(&pdev->dev,
"can't parse mac address, not configuring\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 26ddf092e3ec..0ee2490db729 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -85,6 +85,7 @@ config QED
tristate "QLogic QED 25/40/100Gb core driver"
depends on PCI
select ZLIB_INFLATE
+ select CRC8
---help---
This enables the support for ...
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 0a66389c06c2..1cd39c9a0345 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -2502,12 +2502,10 @@ netxen_collect_minidump(struct netxen_adapter *adapter)
{
int ret = 0;
struct netxen_minidump_template_hdr *hdr;
- struct timespec val;
hdr = (struct netxen_minidump_template_hdr *)
adapter->mdump.md_template;
hdr->driver_capture_mask = adapter->mdump.md_capture_mask;
- jiffies_to_timespec(jiffies, &val);
- hdr->driver_timestamp = (u32) val.tv_sec;
+ hdr->driver_timestamp = ktime_get_seconds();
hdr->driver_info_word2 = adapter->fw_version;
hdr->driver_info_word3 = NXRD32(adapter, CRB_DRIVER_VERSION);
ret = netxen_parse_md_template(adapter);
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 91003bc6f00b..69488554f4b9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -52,10 +52,10 @@
extern const struct qed_common_ops qed_common_ops_pass;
-#define QED_MAJOR_VERSION 8
-#define QED_MINOR_VERSION 10
-#define QED_REVISION_VERSION 11
-#define QED_ENGINEERING_VERSION 21
+#define QED_MAJOR_VERSION 8
+#define QED_MINOR_VERSION 33
+#define QED_REVISION_VERSION 0
+#define QED_ENGINEERING_VERSION 20
#define QED_VERSION \
((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \
@@ -778,8 +778,8 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
return sw_fid;
}
-#define PURE_LB_TC 8
-#define PKT_LB_TC 9
+#define PKT_LB_TC 9
+#define MAX_NUM_VOQS_E4 20
int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index afd07ad91631..6f546e869d8d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -86,22 +86,22 @@
/* connection context union */
union conn_context {
- struct core_conn_context core_ctx;
- struct eth_conn_context eth_ctx;
- struct iscsi_conn_context iscsi_ctx;
- struct fcoe_conn_context fcoe_ctx;
- struct roce_conn_context roce_ctx;
+ struct e4_core_conn_context core_ctx;
+ struct e4_eth_conn_context eth_ctx;
+ struct e4_iscsi_conn_context iscsi_ctx;
+ struct e4_fcoe_conn_context fcoe_ctx;
+ struct e4_roce_conn_context roce_ctx;
};
/* TYPE-0 task context - iSCSI, FCOE */
union type0_task_context {
- struct iscsi_task_context iscsi_ctx;
- struct fcoe_task_context fcoe_ctx;
+ struct e4_iscsi_task_context iscsi_ctx;
+ struct e4_fcoe_task_context fcoe_ctx;
};
/* TYPE-1 task context - ROCE */
union type1_task_context {
- struct rdma_task_context roce_ctx;
+ struct e4_rdma_task_context roce_ctx;
};
struct src_ent {
@@ -109,8 +109,8 @@ struct src_ent {
u64 next;
};
-#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
-#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
+#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
+#define CDUT_SEG_ALIGNMET_IN_BYTES BIT(CDUT_SEG_ALIGNMET + 12)
#define CONN_CXT_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
@@ -742,7 +742,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
qed_cxt_qm_iids(p_hwfn, &qm_iids);
- total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
+ total = qed_qm_pf_mem_size(qm_iids.cids,
qm_iids.vf_cids, qm_iids.tids,
p_hwfn->qm_info.num_pqs,
p_hwfn->qm_info.num_vf_pqs);
@@ -1055,11 +1055,10 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
u32 size;
size = min_t(u32, sz_left, p_blk->real_size_in_page);
- p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- size, &p_phys, GFP_KERNEL);
+ p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, size,
+ &p_phys, GFP_KERNEL);
if (!p_virt)
return -ENOMEM;
- memset(p_virt, 0, size);
ilt_shadow[line].p_phys = p_phys;
ilt_shadow[line].p_virt = p_virt;
@@ -1496,20 +1495,24 @@ static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
}
}
-void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool is_pf_loading)
{
- struct qed_qm_pf_rt_init_params params;
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ struct qed_qm_pf_rt_init_params params;
+ struct qed_mcp_link_state *p_link;
struct qed_qm_iids iids;
memset(&iids, 0, sizeof(iids));
qed_cxt_qm_iids(p_hwfn, &iids);
+ p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
+
memset(&params, 0, sizeof(params));
params.port_id = p_hwfn->port_id;
params.pf_id = p_hwfn->rel_pf_id;
params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
- params.is_first_pf = p_hwfn->first_on_engine;
+ params.is_pf_loading = is_pf_loading;
params.num_pf_cids = iids.cids;
params.num_vf_cids = iids.vf_cids;
params.num_tids = iids.tids;
@@ -1520,6 +1523,7 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
params.num_vports = qm_info->num_vports;
params.pf_wfq = qm_info->pf_wfq;
params.pf_rl = qm_info->pf_rl;
+ params.link_speed = p_link->speed;
params.pq_params = qm_info->qm_pq_params;
params.vport_params = qm_info->qm_vport_params;
@@ -1883,7 +1887,7 @@ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- qed_qm_init_pf(p_hwfn, p_ptt);
+ qed_qm_init_pf(p_hwfn, p_ptt, true);
qed_cm_init_pf(p_hwfn);
qed_dq_init_pf(p_hwfn);
qed_cdu_init_pf(p_hwfn);
@@ -2303,14 +2307,13 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
goto out0;
}
- p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- p_blk->real_size_in_page,
- &p_phys, GFP_KERNEL);
+ p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_blk->real_size_in_page, &p_phys,
+ GFP_KERNEL);
if (!p_virt) {
rc = -ENOMEM;
goto out1;
}
- memset(p_virt, 0, p_blk->real_size_in_page);
/* configuration of refTagMask to 0xF is required for RoCE DIF MR only,
* to compensate for a HW bug, but it is configured even if DIF is not
@@ -2326,7 +2329,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
elem = (union type1_task_context *)elem_start;
SET_FIELD(elem->roce_ctx.tdif_context.flags1,
- TDIF_TASK_CONTEXT_REFTAGMASK, 0xf);
+ TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf);
elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index 17836349a274..a4e95869889f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -169,8 +169,10 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
*
* @param p_hwfn
* @param p_ptt
+ * @param is_pf_loading
*/
-void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool is_pf_loading);
/**
* @brief Reconfigures QM pf on the fly
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index fe7c1f230028..449777f21237 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -954,9 +954,7 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
struct pf_update_ramrod_data *p_dest)
{
struct protocol_dcb_data *p_dcb_data;
- bool update_flag = false;
-
- p_dest->pf_id = p_src->pf_id;
+ u8 update_flag;
update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update;
p_dest->update_fcoe_dcb_data_mode = update_flag;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 03c3cf77aaff..f2633ec87a6a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -21,25 +21,26 @@ enum mem_groups {
MEM_GROUP_DMAE_MEM,
MEM_GROUP_CM_MEM,
MEM_GROUP_QM_MEM,
- MEM_GROUP_TM_MEM,
+ MEM_GROUP_DORQ_MEM,
MEM_GROUP_BRB_RAM,
MEM_GROUP_BRB_MEM,
MEM_GROUP_PRS_MEM,
- MEM_GROUP_SDM_MEM,
MEM_GROUP_IOR,
- MEM_GROUP_RAM,
MEM_GROUP_BTB_RAM,
- MEM_GROUP_RDIF_CTX,
- MEM_GROUP_TDIF_CTX,
- MEM_GROUP_CFC_MEM,
MEM_GROUP_CONN_CFC_MEM,
MEM_GROUP_TASK_CFC_MEM,
MEM_GROUP_CAU_PI,
MEM_GROUP_CAU_MEM,
MEM_GROUP_PXP_ILT,
+ MEM_GROUP_TM_MEM,
+ MEM_GROUP_SDM_MEM,
MEM_GROUP_PBUF,
+ MEM_GROUP_RAM,
MEM_GROUP_MULD_MEM,
MEM_GROUP_BTB_MEM,
+ MEM_GROUP_RDIF_CTX,
+ MEM_GROUP_TDIF_CTX,
+ MEM_GROUP_CFC_MEM,
MEM_GROUP_IGU_MEM,
MEM_GROUP_IGU_MSIX,
MEM_GROUP_CAU_SB,
@@ -54,25 +55,26 @@ static const char * const s_mem_group_names[] = {
"DMAE_MEM",
"CM_MEM",
"QM_MEM",
- "TM_MEM",
+ "DORQ_MEM",
"BRB_RAM",
"BRB_MEM",
"PRS_MEM",
- "SDM_MEM",
"IOR",
- "RAM",
"BTB_RAM",
- "RDIF_CTX",
- "TDIF_CTX",
- "CFC_MEM",
"CONN_CFC_MEM",
"TASK_CFC_MEM",
"CAU_PI",
"CAU_MEM",
"PXP_ILT",
+ "TM_MEM",
+ "SDM_MEM",
"PBUF",
+ "RAM",
"MULD_MEM",
"BTB_MEM",
+ "RDIF_CTX",
+ "TDIF_CTX",
+ "CFC_MEM",
"IGU_MEM",
"IGU_MSIX",
"CAU_SB",
@@ -92,11 +94,6 @@ static u32 cond7(const u32 *r, const u32 *imm)
return ((r[0] >> imm[0]) & imm[1]) != imm[2];
}
-static u32 cond14(const u32 *r, const u32 *imm)
-{
- return (r[0] != imm[0]) && (((r[1] >> imm[1]) & imm[2]) == imm[3]);
-}
-
static u32 cond6(const u32 *r, const u32 *imm)
{
return (r[0] & imm[0]) != imm[1];
@@ -174,7 +171,6 @@ static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
cond11,
cond12,
cond13,
- cond14,
};
/******************************* Data Types **********************************/
@@ -203,6 +199,8 @@ struct chip_defs {
struct platform_defs {
const char *name;
u32 delay_factor;
+ u32 dmae_thresh;
+ u32 log_thresh;
};
/* Storm constant definitions.
@@ -234,7 +232,7 @@ struct storm_defs {
/* Block constant definitions */
struct block_defs {
const char *name;
- bool has_dbg_bus[MAX_CHIP_IDS];
+ bool exists[MAX_CHIP_IDS];
bool associated_to_storm;
/* Valid only if associated_to_storm is true */
@@ -258,8 +256,8 @@ struct block_defs {
/* Reset register definitions */
struct reset_reg_defs {
u32 addr;
- u32 unreset_val;
bool exists[MAX_CHIP_IDS];
+ u32 unreset_val[MAX_CHIP_IDS];
};
struct grc_param_defs {
@@ -276,8 +274,8 @@ struct rss_mem_defs {
const char *mem_name;
const char *type_name;
u32 addr;
+ u32 entry_width;
u32 num_entries[MAX_CHIP_IDS];
- u32 entry_width[MAX_CHIP_IDS];
};
struct vfc_ram_defs {
@@ -294,7 +292,9 @@ struct big_ram_defs {
enum dbg_grc_params grc_param;
u32 addr_reg_addr;
u32 data_reg_addr;
- u32 num_of_blocks[MAX_CHIP_IDS];
+ u32 is_256b_reg_addr;
+ u32 is_256b_bit_offset[MAX_CHIP_IDS];
+ u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
};
struct phy_defs {
@@ -358,20 +358,14 @@ struct phy_defs {
(arr)[i] = qed_rd(dev, ptt, addr); \
} while (0)
-#ifndef DWORDS_TO_BYTES
#define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
-#endif
-#ifndef BYTES_TO_DWORDS
#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
-#endif
-/* extra lines include a signature line + optional latency events line */
-#ifndef NUM_DBG_LINES
+/* Extra lines include a signature line + optional latency events line */
#define NUM_EXTRA_DBG_LINES(block_desc) \
(1 + ((block_desc)->has_latency_events ? 1 : 0))
#define NUM_DBG_LINES(block_desc) \
((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
-#endif
#define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
#define RAM_LINES_TO_BYTES(lines) \
@@ -424,9 +418,6 @@ struct phy_defs {
#define NUM_RSS_MEM_TYPES 5
#define NUM_BIG_RAM_TYPES 3
-#define BIG_RAM_BLOCK_SIZE_BYTES 128
-#define BIG_RAM_BLOCK_SIZE_DWORDS \
- BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES)
#define NUM_PHY_TBUS_ADDRESSES 2048
#define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
@@ -441,23 +432,17 @@ struct phy_defs {
#define FW_IMG_MAIN 1
-#ifndef REG_FIFO_ELEMENT_DWORDS
#define REG_FIFO_ELEMENT_DWORDS 2
-#endif
#define REG_FIFO_DEPTH_ELEMENTS 32
#define REG_FIFO_DEPTH_DWORDS \
(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
-#ifndef IGU_FIFO_ELEMENT_DWORDS
#define IGU_FIFO_ELEMENT_DWORDS 4
-#endif
#define IGU_FIFO_DEPTH_ELEMENTS 64
#define IGU_FIFO_DEPTH_DWORDS \
(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
-#ifndef PROTECTION_OVERRIDE_ELEMENT_DWORDS
#define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
-#endif
#define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
#define PROTECTION_OVERRIDE_DEPTH_DWORDS \
(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
@@ -491,6 +476,11 @@ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
{{MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2},
{0, 0, 0},
{0, 0, 0},
+ {0, 0, 0} } },
+ { "reserved",
+ {{0, 0, 0},
+ {0, 0, 0},
+ {0, 0, 0},
{0, 0, 0} } }
};
@@ -498,7 +488,8 @@ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
static struct storm_defs s_storm_defs[] = {
/* Tstorm */
{'T', BLOCK_TSEM,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
+ DBG_BUS_CLIENT_RBCT}, true,
TSEM_REG_FAST_MEMORY,
TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -511,7 +502,8 @@ static struct storm_defs s_storm_defs[] = {
/* Mstorm */
{'M', BLOCK_MSEM,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM,
+ DBG_BUS_CLIENT_RBCM}, false,
MSEM_REG_FAST_MEMORY,
MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -524,7 +516,8 @@ static struct storm_defs s_storm_defs[] = {
/* Ustorm */
{'U', BLOCK_USEM,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
+ DBG_BUS_CLIENT_RBCU}, false,
USEM_REG_FAST_MEMORY,
USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -537,7 +530,8 @@ static struct storm_defs s_storm_defs[] = {
/* Xstorm */
{'X', BLOCK_XSEM,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
+ DBG_BUS_CLIENT_RBCX}, false,
XSEM_REG_FAST_MEMORY,
XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -550,7 +544,8 @@ static struct storm_defs s_storm_defs[] = {
/* Ystorm */
{'Y', BLOCK_YSEM,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY,
+ DBG_BUS_CLIENT_RBCY}, false,
YSEM_REG_FAST_MEMORY,
YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -563,7 +558,8 @@ static struct storm_defs s_storm_defs[] = {
/* Pstorm */
{'P', BLOCK_PSEM,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
+ DBG_BUS_CLIENT_RBCS}, true,
PSEM_REG_FAST_MEMORY,
PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -579,8 +575,8 @@ static struct storm_defs s_storm_defs[] = {
static struct block_defs block_grc_defs = {
"grc",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
GRC_REG_DBG_FORCE_FRAME,
@@ -588,30 +584,30 @@ static struct block_defs block_grc_defs = {
};
static struct block_defs block_miscs_defs = {
- "miscs", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "miscs", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_misc_defs = {
- "misc", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "misc", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_dbu_defs = {
- "dbu", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "dbu", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_pglue_b_defs = {
"pglue_b",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
PGLUE_B_REG_DBG_FORCE_FRAME,
@@ -620,25 +616,26 @@ static struct block_defs block_pglue_b_defs = {
static struct block_defs block_cnig_defs = {
"cnig",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
- CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2,
- CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2,
- CNIG_REG_DBG_FORCE_FRAME_K2,
+ {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW,
+ DBG_BUS_CLIENT_RBCW},
+ CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
+ CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
+ CNIG_REG_DBG_FORCE_FRAME_K2_E5,
true, false, DBG_RESET_REG_MISCS_PL_HV, 0
};
static struct block_defs block_cpmu_defs = {
- "cpmu", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "cpmu", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISCS_PL_HV, 8
};
static struct block_defs block_ncsi_defs = {
"ncsi",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
NCSI_REG_DBG_FORCE_FRAME,
@@ -646,16 +643,16 @@ static struct block_defs block_ncsi_defs = {
};
static struct block_defs block_opte_defs = {
- "opte", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "opte", {true, true, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISCS_PL_HV, 4
};
static struct block_defs block_bmb_defs = {
"bmb",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB},
BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
BMB_REG_DBG_FORCE_FRAME,
@@ -664,27 +661,28 @@ static struct block_defs block_bmb_defs = {
static struct block_defs block_pcie_defs = {
"pcie",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
- PCIE_REG_DBG_COMMON_SELECT_K2,
- PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2,
- PCIE_REG_DBG_COMMON_SHIFT_K2,
- PCIE_REG_DBG_COMMON_FORCE_VALID_K2,
- PCIE_REG_DBG_COMMON_FORCE_FRAME_K2,
+ {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
+ DBG_BUS_CLIENT_RBCH},
+ PCIE_REG_DBG_COMMON_SELECT_K2_E5,
+ PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
+ PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
+ PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
+ PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_mcp_defs = {
- "mcp", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "mcp", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_mcp2_defs = {
"mcp2",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
MCP2_REG_DBG_FORCE_FRAME,
@@ -693,8 +691,8 @@ static struct block_defs block_mcp2_defs = {
static struct block_defs block_pswhst_defs = {
"pswhst",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
PSWHST_REG_DBG_FORCE_FRAME,
@@ -703,8 +701,8 @@ static struct block_defs block_pswhst_defs = {
static struct block_defs block_pswhst2_defs = {
"pswhst2",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
PSWHST2_REG_DBG_FORCE_FRAME,
@@ -713,8 +711,8 @@ static struct block_defs block_pswhst2_defs = {
static struct block_defs block_pswrd_defs = {
"pswrd",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
PSWRD_REG_DBG_FORCE_FRAME,
@@ -723,8 +721,8 @@ static struct block_defs block_pswrd_defs = {
static struct block_defs block_pswrd2_defs = {
"pswrd2",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
PSWRD2_REG_DBG_FORCE_FRAME,
@@ -733,8 +731,8 @@ static struct block_defs block_pswrd2_defs = {
static struct block_defs block_pswwr_defs = {
"pswwr",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
PSWWR_REG_DBG_FORCE_FRAME,
@@ -742,16 +740,16 @@ static struct block_defs block_pswwr_defs = {
};
static struct block_defs block_pswwr2_defs = {
- "pswwr2", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "pswwr2", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISC_PL_HV, 3
};
static struct block_defs block_pswrq_defs = {
"pswrq",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
PSWRQ_REG_DBG_FORCE_FRAME,
@@ -760,8 +758,8 @@ static struct block_defs block_pswrq_defs = {
static struct block_defs block_pswrq2_defs = {
"pswrq2",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
PSWRQ2_REG_DBG_FORCE_FRAME,
@@ -770,18 +768,19 @@ static struct block_defs block_pswrq2_defs = {
static struct block_defs block_pglcs_defs = {
"pglcs",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
- PGLCS_REG_DBG_SELECT_K2, PGLCS_REG_DBG_DWORD_ENABLE_K2,
- PGLCS_REG_DBG_SHIFT_K2, PGLCS_REG_DBG_FORCE_VALID_K2,
- PGLCS_REG_DBG_FORCE_FRAME_K2,
+ {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
+ DBG_BUS_CLIENT_RBCH},
+ PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
+ PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
+ PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
true, false, DBG_RESET_REG_MISCS_PL_HV, 2
};
static struct block_defs block_ptu_defs = {
"ptu",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
PTU_REG_DBG_FORCE_FRAME,
@@ -790,8 +789,8 @@ static struct block_defs block_ptu_defs = {
static struct block_defs block_dmae_defs = {
"dmae",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
DMAE_REG_DBG_FORCE_FRAME,
@@ -800,8 +799,8 @@ static struct block_defs block_dmae_defs = {
static struct block_defs block_tcm_defs = {
"tcm",
- {true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ {true, true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
TCM_REG_DBG_FORCE_FRAME,
@@ -810,8 +809,8 @@ static struct block_defs block_tcm_defs = {
static struct block_defs block_mcm_defs = {
"mcm",
- {true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ {true, true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
MCM_REG_DBG_FORCE_FRAME,
@@ -820,8 +819,8 @@ static struct block_defs block_mcm_defs = {
static struct block_defs block_ucm_defs = {
"ucm",
- {true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ {true, true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
UCM_REG_DBG_FORCE_FRAME,
@@ -830,8 +829,8 @@ static struct block_defs block_ucm_defs = {
static struct block_defs block_xcm_defs = {
"xcm",
- {true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ {true, true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
XCM_REG_DBG_FORCE_FRAME,
@@ -840,8 +839,8 @@ static struct block_defs block_xcm_defs = {
static struct block_defs block_ycm_defs = {
"ycm",
- {true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ {true, true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
YCM_REG_DBG_FORCE_FRAME,
@@ -850,8 +849,8 @@ static struct block_defs block_ycm_defs = {
static struct block_defs block_pcm_defs = {
"pcm",
- {true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ {true, true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
PCM_REG_DBG_FORCE_FRAME,
@@ -860,8 +859,8 @@ static struct block_defs block_pcm_defs = {
static struct block_defs block_qm_defs = {
"qm",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ},
QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
QM_REG_DBG_FORCE_FRAME,
@@ -870,8 +869,8 @@ static struct block_defs block_qm_defs = {
static struct block_defs block_tm_defs = {
"tm",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
TM_REG_DBG_FORCE_FRAME,
@@ -880,8 +879,8 @@ static struct block_defs block_tm_defs = {
static struct block_defs block_dorq_defs = {
"dorq",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
DORQ_REG_DBG_FORCE_FRAME,
@@ -890,8 +889,8 @@ static struct block_defs block_dorq_defs = {
static struct block_defs block_brb_defs = {
"brb",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
BRB_REG_DBG_FORCE_FRAME,
@@ -900,8 +899,8 @@ static struct block_defs block_brb_defs = {
static struct block_defs block_src_defs = {
"src",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
SRC_REG_DBG_FORCE_FRAME,
@@ -910,8 +909,8 @@ static struct block_defs block_src_defs = {
static struct block_defs block_prs_defs = {
"prs",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
PRS_REG_DBG_FORCE_FRAME,
@@ -920,8 +919,8 @@ static struct block_defs block_prs_defs = {
static struct block_defs block_tsdm_defs = {
"tsdm",
- {true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ {true, true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
TSDM_REG_DBG_FORCE_FRAME,
@@ -930,8 +929,8 @@ static struct block_defs block_tsdm_defs = {
static struct block_defs block_msdm_defs = {
"msdm",
- {true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ {true, true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
MSDM_REG_DBG_FORCE_FRAME,
@@ -940,8 +939,8 @@ static struct block_defs block_msdm_defs = {
static struct block_defs block_usdm_defs = {
"usdm",
- {true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ {true, true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
USDM_REG_DBG_FORCE_FRAME,
@@ -950,8 +949,8 @@ static struct block_defs block_usdm_defs = {
static struct block_defs block_xsdm_defs = {
"xsdm",
- {true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ {true, true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
XSDM_REG_DBG_FORCE_FRAME,
@@ -960,8 +959,8 @@ static struct block_defs block_xsdm_defs = {
static struct block_defs block_ysdm_defs = {
"ysdm",
- {true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ {true, true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
YSDM_REG_DBG_FORCE_FRAME,
@@ -970,8 +969,8 @@ static struct block_defs block_ysdm_defs = {
static struct block_defs block_psdm_defs = {
"psdm",
- {true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ {true, true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
PSDM_REG_DBG_FORCE_FRAME,
@@ -980,8 +979,8 @@ static struct block_defs block_psdm_defs = {
static struct block_defs block_tsem_defs = {
"tsem",
- {true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ {true, true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
TSEM_REG_DBG_FORCE_FRAME,
@@ -990,8 +989,8 @@ static struct block_defs block_tsem_defs = {
static struct block_defs block_msem_defs = {
"msem",
- {true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ {true, true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
MSEM_REG_DBG_FORCE_FRAME,
@@ -1000,8 +999,8 @@ static struct block_defs block_msem_defs = {
static struct block_defs block_usem_defs = {
"usem",
- {true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ {true, true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
USEM_REG_DBG_FORCE_FRAME,
@@ -1010,8 +1009,8 @@ static struct block_defs block_usem_defs = {
static struct block_defs block_xsem_defs = {
"xsem",
- {true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ {true, true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
XSEM_REG_DBG_FORCE_FRAME,
@@ -1020,8 +1019,8 @@ static struct block_defs block_xsem_defs = {
static struct block_defs block_ysem_defs = {
"ysem",
- {true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ {true, true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
YSEM_REG_DBG_FORCE_FRAME,
@@ -1030,8 +1029,8 @@ static struct block_defs block_ysem_defs = {
static struct block_defs block_psem_defs = {
"psem",
- {true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ {true, true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
PSEM_REG_DBG_FORCE_FRAME,
@@ -1040,8 +1039,8 @@ static struct block_defs block_psem_defs = {
static struct block_defs block_rss_defs = {
"rss",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
RSS_REG_DBG_FORCE_FRAME,
@@ -1050,8 +1049,8 @@ static struct block_defs block_rss_defs = {
static struct block_defs block_tmld_defs = {
"tmld",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
TMLD_REG_DBG_FORCE_FRAME,
@@ -1060,8 +1059,8 @@ static struct block_defs block_tmld_defs = {
static struct block_defs block_muld_defs = {
"muld",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
MULD_REG_DBG_FORCE_FRAME,
@@ -1070,8 +1069,9 @@ static struct block_defs block_muld_defs = {
static struct block_defs block_yuld_defs = {
"yuld",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ {true, true, false}, false, 0,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
+ MAX_DBG_BUS_CLIENTS},
YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
YULD_REG_DBG_FORCE_FRAME_BB_K2,
@@ -1081,18 +1081,40 @@ static struct block_defs block_yuld_defs = {
static struct block_defs block_xyld_defs = {
"xyld",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
XYLD_REG_DBG_FORCE_FRAME,
true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
};
+static struct block_defs block_ptld_defs = {
+ "ptld",
+ {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT},
+ PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5,
+ PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5,
+ PTLD_REG_DBG_FORCE_FRAME_E5,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
+ 28
+};
+
+static struct block_defs block_ypld_defs = {
+ "ypld",
+ {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS},
+ YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5,
+ YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5,
+ YPLD_REG_DBG_FORCE_FRAME_E5,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
+ 27
+};
+
static struct block_defs block_prm_defs = {
"prm",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
PRM_REG_DBG_FORCE_FRAME,
@@ -1101,8 +1123,8 @@ static struct block_defs block_prm_defs = {
static struct block_defs block_pbf_pb1_defs = {
"pbf_pb1",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
PBF_PB1_REG_DBG_FORCE_FRAME,
@@ -1112,8 +1134,8 @@ static struct block_defs block_pbf_pb1_defs = {
static struct block_defs block_pbf_pb2_defs = {
"pbf_pb2",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
PBF_PB2_REG_DBG_FORCE_FRAME,
@@ -1123,8 +1145,8 @@ static struct block_defs block_pbf_pb2_defs = {
static struct block_defs block_rpb_defs = {
"rpb",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
RPB_REG_DBG_FORCE_FRAME,
@@ -1133,8 +1155,8 @@ static struct block_defs block_rpb_defs = {
static struct block_defs block_btb_defs = {
"btb",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
BTB_REG_DBG_FORCE_FRAME,
@@ -1143,8 +1165,8 @@ static struct block_defs block_btb_defs = {
static struct block_defs block_pbf_defs = {
"pbf",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
PBF_REG_DBG_FORCE_FRAME,
@@ -1153,8 +1175,8 @@ static struct block_defs block_pbf_defs = {
static struct block_defs block_rdif_defs = {
"rdif",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
RDIF_REG_DBG_FORCE_FRAME,
@@ -1163,8 +1185,8 @@ static struct block_defs block_rdif_defs = {
static struct block_defs block_tdif_defs = {
"tdif",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
TDIF_REG_DBG_FORCE_FRAME,
@@ -1173,8 +1195,8 @@ static struct block_defs block_tdif_defs = {
static struct block_defs block_cdu_defs = {
"cdu",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
CDU_REG_DBG_FORCE_FRAME,
@@ -1183,8 +1205,8 @@ static struct block_defs block_cdu_defs = {
static struct block_defs block_ccfc_defs = {
"ccfc",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
CCFC_REG_DBG_FORCE_FRAME,
@@ -1193,8 +1215,8 @@ static struct block_defs block_ccfc_defs = {
static struct block_defs block_tcfc_defs = {
"tcfc",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
TCFC_REG_DBG_FORCE_FRAME,
@@ -1203,8 +1225,8 @@ static struct block_defs block_tcfc_defs = {
static struct block_defs block_igu_defs = {
"igu",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
IGU_REG_DBG_FORCE_FRAME,
@@ -1213,42 +1235,79 @@ static struct block_defs block_igu_defs = {
static struct block_defs block_cau_defs = {
"cau",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
CAU_REG_DBG_FORCE_FRAME,
true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
};
+static struct block_defs block_rgfs_defs = {
+ "rgfs", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29
+};
+
+static struct block_defs block_rgsrc_defs = {
+ "rgsrc",
+ {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+ RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5,
+ RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5,
+ RGSRC_REG_DBG_FORCE_FRAME_E5,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+ 30
+};
+
+static struct block_defs block_tgfs_defs = {
+ "tgfs", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30
+};
+
+static struct block_defs block_tgsrc_defs = {
+ "tgsrc",
+ {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV},
+ TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5,
+ TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5,
+ TGSRC_REG_DBG_FORCE_FRAME_E5,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+ 31
+};
+
static struct block_defs block_umac_defs = {
"umac",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
- UMAC_REG_DBG_SELECT_K2, UMAC_REG_DBG_DWORD_ENABLE_K2,
- UMAC_REG_DBG_SHIFT_K2, UMAC_REG_DBG_FORCE_VALID_K2,
- UMAC_REG_DBG_FORCE_FRAME_K2,
+ {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ,
+ DBG_BUS_CLIENT_RBCZ},
+ UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
+ UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
+ UMAC_REG_DBG_FORCE_FRAME_K2_E5,
true, false, DBG_RESET_REG_MISCS_PL_HV, 6
};
static struct block_defs block_xmac_defs = {
- "xmac", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "xmac", {true, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_dbg_defs = {
- "dbg", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "dbg", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
};
static struct block_defs block_nig_defs = {
"nig",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
NIG_REG_DBG_FORCE_FRAME,
@@ -1257,139 +1316,106 @@ static struct block_defs block_nig_defs = {
static struct block_defs block_wol_defs = {
"wol",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
- WOL_REG_DBG_SELECT_K2, WOL_REG_DBG_DWORD_ENABLE_K2,
- WOL_REG_DBG_SHIFT_K2, WOL_REG_DBG_FORCE_VALID_K2,
- WOL_REG_DBG_FORCE_FRAME_K2,
+ {false, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+ WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
+ WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
+ WOL_REG_DBG_FORCE_FRAME_K2_E5,
true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
};
static struct block_defs block_bmbn_defs = {
"bmbn",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
- BMBN_REG_DBG_SELECT_K2, BMBN_REG_DBG_DWORD_ENABLE_K2,
- BMBN_REG_DBG_SHIFT_K2, BMBN_REG_DBG_FORCE_VALID_K2,
- BMBN_REG_DBG_FORCE_FRAME_K2,
+ {false, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB,
+ DBG_BUS_CLIENT_RBCB},
+ BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
+ BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
+ BMBN_REG_DBG_FORCE_FRAME_K2_E5,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_ipc_defs = {
- "ipc", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "ipc", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISCS_PL_UA, 8
};
static struct block_defs block_nwm_defs = {
"nwm",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
- NWM_REG_DBG_SELECT_K2, NWM_REG_DBG_DWORD_ENABLE_K2,
- NWM_REG_DBG_SHIFT_K2, NWM_REG_DBG_FORCE_VALID_K2,
- NWM_REG_DBG_FORCE_FRAME_K2,
+ {false, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
+ NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
+ NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
+ NWM_REG_DBG_FORCE_FRAME_K2_E5,
true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
};
static struct block_defs block_nws_defs = {
"nws",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
- NWS_REG_DBG_SELECT_K2, NWS_REG_DBG_DWORD_ENABLE_K2,
- NWS_REG_DBG_SHIFT_K2, NWS_REG_DBG_FORCE_VALID_K2,
- NWS_REG_DBG_FORCE_FRAME_K2,
+ {false, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
+ NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
+ NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
+ NWS_REG_DBG_FORCE_FRAME_K2_E5,
true, false, DBG_RESET_REG_MISCS_PL_HV, 12
};
static struct block_defs block_ms_defs = {
"ms",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
- MS_REG_DBG_SELECT_K2, MS_REG_DBG_DWORD_ENABLE_K2,
- MS_REG_DBG_SHIFT_K2, MS_REG_DBG_FORCE_VALID_K2,
- MS_REG_DBG_FORCE_FRAME_K2,
+ {false, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+ MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
+ MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
+ MS_REG_DBG_FORCE_FRAME_K2_E5,
true, false, DBG_RESET_REG_MISCS_PL_HV, 13
};
static struct block_defs block_phy_pcie_defs = {
"phy_pcie",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
- PCIE_REG_DBG_COMMON_SELECT_K2,
- PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2,
- PCIE_REG_DBG_COMMON_SHIFT_K2,
- PCIE_REG_DBG_COMMON_FORCE_VALID_K2,
- PCIE_REG_DBG_COMMON_FORCE_FRAME_K2,
+ {false, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
+ DBG_BUS_CLIENT_RBCH},
+ PCIE_REG_DBG_COMMON_SELECT_K2_E5,
+ PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
+ PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
+ PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
+ PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_led_defs = {
- "led", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "led", {false, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISCS_PL_HV, 14
};
static struct block_defs block_avs_wrap_defs = {
- "avs_wrap", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "avs_wrap", {false, true, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISCS_PL_UA, 11
};
-static struct block_defs block_rgfs_defs = {
- "rgfs", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_rgsrc_defs = {
- "rgsrc", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_tgfs_defs = {
- "tgfs", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_tgsrc_defs = {
- "tgsrc", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_ptld_defs = {
- "ptld", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_ypld_defs = {
- "ypld", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+static struct block_defs block_pxpreqbus_defs = {
+ "pxpreqbus", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_misc_aeu_defs = {
- "misc_aeu", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "misc_aeu", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_bar0_map_defs = {
- "bar0_map", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "bar0_map", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
@@ -1480,164 +1506,160 @@ static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
&block_phy_pcie_defs,
&block_led_defs,
&block_avs_wrap_defs,
+ &block_pxpreqbus_defs,
&block_misc_aeu_defs,
&block_bar0_map_defs,
};
static struct platform_defs s_platform_defs[] = {
- {"asic", 1},
- {"reserved", 0},
- {"reserved2", 0},
- {"reserved3", 0}
+ {"asic", 1, 256, 32768},
+ {"reserved", 0, 0, 0},
+ {"reserved2", 0, 0, 0},
+ {"reserved3", 0, 0, 0}
};
static struct grc_param_defs s_grc_param_defs[] = {
/* DBG_GRC_PARAM_DUMP_TSTORM */
- {{1, 1}, 0, 1, false, 1, 1},
+ {{1, 1, 1}, 0, 1, false, 1, 1},
/* DBG_GRC_PARAM_DUMP_MSTORM */
- {{1, 1}, 0, 1, false, 1, 1},
+ {{1, 1, 1}, 0, 1, false, 1, 1},
/* DBG_GRC_PARAM_DUMP_USTORM */
- {{1, 1}, 0, 1, false, 1, 1},
+ {{1, 1, 1}, 0, 1, false, 1, 1},
/* DBG_GRC_PARAM_DUMP_XSTORM */
- {{1, 1}, 0, 1, false, 1, 1},
+ {{1, 1, 1}, 0, 1, false, 1, 1},
/* DBG_GRC_PARAM_DUMP_YSTORM */
- {{1, 1}, 0, 1, false, 1, 1},
+ {{1, 1, 1}, 0, 1, false, 1, 1},
/* DBG_GRC_PARAM_DUMP_PSTORM */
- {{1, 1}, 0, 1, false, 1, 1},
+ {{1, 1, 1}, 0, 1, false, 1, 1},
/* DBG_GRC_PARAM_DUMP_REGS */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_RAM */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_PBUF */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_IOR */
- {{0, 0}, 0, 1, false, 0, 1},
+ {{0, 0, 0}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_VFC */
- {{0, 0}, 0, 1, false, 0, 1},
+ {{0, 0, 0}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_CM_CTX */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_ILT */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_RSS */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_CAU */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_QM */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_MCP */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_RESERVED */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_CFC */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_IGU */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_BRB */
- {{0, 0}, 0, 1, false, 0, 1},
+ {{0, 0, 0}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_BTB */
- {{0, 0}, 0, 1, false, 0, 1},
+ {{0, 0, 0}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_BMB */
- {{0, 0}, 0, 1, false, 0, 1},
+ {{0, 0, 0}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_NIG */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_MULD */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_PRS */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_DMAE */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_TM */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_SDM */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_DIF */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_STATIC */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_UNSTALL */
- {{0, 0}, 0, 1, false, 0, 0},
+ {{0, 0, 0}, 0, 1, false, 0, 0},
/* DBG_GRC_PARAM_NUM_LCIDS */
- {{MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
+ {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
MAX_LCIDS},
/* DBG_GRC_PARAM_NUM_LTIDS */
- {{MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
+ {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
MAX_LTIDS},
/* DBG_GRC_PARAM_EXCLUDE_ALL */
- {{0, 0}, 0, 1, true, 0, 0},
+ {{0, 0, 0}, 0, 1, true, 0, 0},
/* DBG_GRC_PARAM_CRASH */
- {{0, 0}, 0, 1, true, 0, 0},
+ {{0, 0, 0}, 0, 1, true, 0, 0},
/* DBG_GRC_PARAM_PARITY_SAFE */
- {{0, 0}, 0, 1, false, 1, 0},
+ {{0, 0, 0}, 0, 1, false, 1, 0},
/* DBG_GRC_PARAM_DUMP_CM */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_PHY */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_NO_MCP */
- {{0, 0}, 0, 1, false, 0, 0},
+ {{0, 0, 0}, 0, 1, false, 0, 0},
/* DBG_GRC_PARAM_NO_FW_VER */
- {{0, 0}, 0, 1, false, 0, 0}
+ {{0, 0, 0}, 0, 1, false, 0, 0}
};
static struct rss_mem_defs s_rss_mem_defs[] = {
- { "rss_mem_cid", "rss_cid", 0,
- {256, 320},
- {32, 32} },
+ { "rss_mem_cid", "rss_cid", 0, 32,
+ {256, 320, 512} },
- { "rss_mem_key_msb", "rss_key", 1024,
- {128, 208},
- {256, 256} },
+ { "rss_mem_key_msb", "rss_key", 1024, 256,
+ {128, 208, 257} },
- { "rss_mem_key_lsb", "rss_key", 2048,
- {128, 208},
- {64, 64} },
+ { "rss_mem_key_lsb", "rss_key", 2048, 64,
+ {128, 208, 257} },
- { "rss_mem_info", "rss_info", 3072,
- {128, 208},
- {16, 16} },
+ { "rss_mem_info", "rss_info", 3072, 16,
+ {128, 208, 256} },
- { "rss_mem_ind", "rss_ind", 4096,
- {16384, 26624},
- {16, 16} }
+ { "rss_mem_ind", "rss_ind", 4096, 16,
+ {16384, 26624, 32768} }
};
static struct vfc_ram_defs s_vfc_ram_defs[] = {
@@ -1650,72 +1672,75 @@ static struct vfc_ram_defs s_vfc_ram_defs[] = {
static struct big_ram_defs s_big_ram_defs[] = {
{ "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
- {4800, 5632} },
+ MISC_REG_BLOCK_256B_EN, {0, 0, 0},
+ {153600, 180224, 282624} },
{ "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
- {2880, 3680} },
+ MISC_REG_BLOCK_256B_EN, {0, 1, 1},
+ {92160, 117760, 168960} },
{ "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
- {1152, 1152} }
+ MISCS_REG_BLOCK_256B_EN, {0, 0, 0},
+ {36864, 36864, 36864} }
};
static struct reset_reg_defs s_reset_regs_defs[] = {
/* DBG_RESET_REG_MISCS_PL_UA */
- { MISCS_REG_RESET_PL_UA, 0x0,
- {true, true} },
+ { MISCS_REG_RESET_PL_UA,
+ {true, true, true}, {0x0, 0x0, 0x0} },
/* DBG_RESET_REG_MISCS_PL_HV */
- { MISCS_REG_RESET_PL_HV, 0x0,
- {true, true} },
+ { MISCS_REG_RESET_PL_HV,
+ {true, true, true}, {0x0, 0x400, 0x600} },
/* DBG_RESET_REG_MISCS_PL_HV_2 */
- { MISCS_REG_RESET_PL_HV_2_K2, 0x0,
- {false, true} },
+ { MISCS_REG_RESET_PL_HV_2_K2_E5,
+ {false, true, true}, {0x0, 0x0, 0x0} },
/* DBG_RESET_REG_MISC_PL_UA */
- { MISC_REG_RESET_PL_UA, 0x0,
- {true, true} },
+ { MISC_REG_RESET_PL_UA,
+ {true, true, true}, {0x0, 0x0, 0x0} },
/* DBG_RESET_REG_MISC_PL_HV */
- { MISC_REG_RESET_PL_HV, 0x0,
- {true, true} },
+ { MISC_REG_RESET_PL_HV,
+ {true, true, true}, {0x0, 0x0, 0x0} },
/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
- { MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040,
- {true, true} },
+ { MISC_REG_RESET_PL_PDA_VMAIN_1,
+ {true, true, true}, {0x4404040, 0x4404040, 0x404040} },
/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
- { MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007,
- {true, true} },
+ { MISC_REG_RESET_PL_PDA_VMAIN_2,
+ {true, true, true}, {0x7, 0x7c00007, 0x5c08007} },
/* DBG_RESET_REG_MISC_PL_PDA_VAUX */
- { MISC_REG_RESET_PL_PDA_VAUX, 0x2,
- {true, true} },
+ { MISC_REG_RESET_PL_PDA_VAUX,
+ {true, true, true}, {0x2, 0x2, 0x2} },
};
static struct phy_defs s_phy_defs[] = {
{"nw_phy", NWS_REG_NWS_CMU_K2,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2},
- {"sgmii_phy", MS_REG_MS_CMU_K2,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2},
- {"pcie_phy0", PHY_PCIE_REG_PHY0_K2,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
- {"pcie_phy1", PHY_PCIE_REG_PHY1_K2,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5},
+ {"sgmii_phy", MS_REG_MS_CMU_K2_E5,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
+ {"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
+ {"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
};
/**************************** Private Functions ******************************/
@@ -1774,7 +1799,9 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
/* Initializes the GRC parameters */
qed_dbg_grc_init_params(p_hwfn);
- dev_data->initialized = true;
+ dev_data->use_dmae = true;
+ dev_data->num_regs_read = 0;
+ dev_data->initialized = 1;
return DBG_STATUS_OK;
}
@@ -1807,7 +1834,7 @@ static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
* The address is located in the last line of the Storm RAM.
*/
addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
- DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
+ DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_BB_K2) -
sizeof(fw_info_location);
dest = (u32 *)&fw_info_location;
@@ -2071,8 +2098,7 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
/* Writes the "last" section (including CRC) to the specified buffer at the
* given offset. Returns the dumped size in dwords.
*/
-static u32 qed_dump_last_section(struct qed_hwfn *p_hwfn,
- u32 *dump_buf, u32 offset, bool dump)
+static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
{
u32 start_offset = offset;
@@ -2235,7 +2261,8 @@ static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
case MEM_GROUP_CFC_MEM:
case MEM_GROUP_CONN_CFC_MEM:
case MEM_GROUP_TASK_CFC_MEM:
- return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC);
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
+ qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
case MEM_GROUP_IGU_MEM:
case MEM_GROUP_IGU_MSIX:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
@@ -2293,7 +2320,8 @@ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
struct block_defs *block = s_block_defs[block_id];
- if (block->has_reset_bit && block->unreset)
+ if (block->exists[dev_data->chip_id] && block->has_reset_bit &&
+ block->unreset)
reg_val[block->reset_reg] |=
BIT(block->reset_bit_offset);
}
@@ -2303,7 +2331,8 @@ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
continue;
- reg_val[i] |= s_reset_regs_defs[i].unreset_val;
+ reg_val[i] |=
+ s_reset_regs_defs[i].unreset_val[dev_data->chip_id];
if (reg_val[i])
qed_wr(p_hwfn,
@@ -2413,6 +2442,18 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
return offset;
}
+/* Reads the specified registers into the specified buffer.
+ * The addr and len arguments are specified in dwords.
+ */
+void qed_read_regs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
+{
+ u32 i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
+}
+
/* Dumps the GRC registers in the specified address range.
* Returns the dumped size in dwords.
* The addr and len arguments are specified in dwords.
@@ -2422,15 +2463,39 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
bool dump, u32 addr, u32 len, bool wide_bus)
{
- u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i;
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
if (!dump)
return len;
- for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
- *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
+ /* Print log if needed */
+ dev_data->num_regs_read += len;
+ if (dev_data->num_regs_read >=
+ s_platform_defs[dev_data->platform_id].log_thresh) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "Dumping %d registers...\n",
+ dev_data->num_regs_read);
+ dev_data->num_regs_read = 0;
+ }
- return offset;
+ /* Try reading using DMAE */
+ if (dev_data->use_dmae &&
+ (len >= s_platform_defs[dev_data->platform_id].dmae_thresh ||
+ wide_bus)) {
+ if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr),
+ (u64)(uintptr_t)(dump_buf), len, 0))
+ return len;
+ dev_data->use_dmae = 0;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "Failed reading from chip using DMAE, using GRC instead\n");
+ }
+
+ /* Read registers */
+ qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
+
+ return len;
}
/* Dumps GRC registers sequence header. Returns the dumped size in dwords.
@@ -2630,9 +2695,6 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
chip = &s_chip_defs[dev_data->chip_id];
chip_platform = &chip->per_platform[dev_data->platform_id];
- if (dump)
- DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n");
-
while (input_offset <
s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
const struct dbg_dump_split_hdr *split_hdr;
@@ -2966,22 +3028,12 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
offset += qed_dump_str_param(dump_buf + offset,
dump, "name", buf);
- if (dump)
- DP_VERBOSE(p_hwfn,
- QED_MSG_DEBUG,
- "Dumping %d registers from %s...\n",
- len, buf);
} else {
/* Dump address */
u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
offset += qed_dump_num_param(dump_buf + offset,
dump, "addr", addr_in_bytes);
- if (dump && len > 64)
- DP_VERBOSE(p_hwfn,
- QED_MSG_DEBUG,
- "Dumping %d registers from address 0x%x...\n",
- len, addr_in_bytes);
}
/* Dump len */
@@ -3530,17 +3582,16 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
u8 rss_mem_id;
for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
- u32 rss_addr, num_entries, entry_width, total_dwords, i;
+ u32 rss_addr, num_entries, total_dwords;
struct rss_mem_defs *rss_defs;
- u32 addr, size;
+ u32 addr, num_dwords_to_read;
bool packed;
rss_defs = &s_rss_mem_defs[rss_mem_id];
rss_addr = rss_defs->addr;
num_entries = rss_defs->num_entries[dev_data->chip_id];
- entry_width = rss_defs->entry_width[dev_data->chip_id];
- total_dwords = (num_entries * entry_width) / 32;
- packed = (entry_width == 16);
+ total_dwords = (num_entries * rss_defs->entry_width) / 32;
+ packed = (rss_defs->entry_width == 16);
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
@@ -3548,7 +3599,7 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
rss_defs->mem_name,
0,
total_dwords,
- entry_width,
+ rss_defs->entry_width,
packed,
rss_defs->type_name, false, 0);
@@ -3559,16 +3610,20 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
}
addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
- size = RSS_REG_RSS_RAM_DATA_SIZE;
- for (i = 0; i < total_dwords; i += size, rss_addr++) {
+ while (total_dwords) {
+ num_dwords_to_read = min_t(u32,
+ RSS_REG_RSS_RAM_DATA_SIZE,
+ total_dwords);
qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
offset += qed_grc_dump_addr_range(p_hwfn,
p_ptt,
dump_buf + offset,
dump,
addr,
- size,
+ num_dwords_to_read,
false);
+ total_dwords -= num_dwords_to_read;
+ rss_addr++;
}
}
@@ -3581,14 +3636,18 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
u32 *dump_buf, bool dump, u8 big_ram_id)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 total_blocks, ram_size, offset = 0, i;
+ u32 block_size, ram_size, offset = 0, reg_val, i;
char mem_name[12] = "???_BIG_RAM";
char type_name[8] = "???_RAM";
struct big_ram_defs *big_ram;
big_ram = &s_big_ram_defs[big_ram_id];
- total_blocks = big_ram->num_of_blocks[dev_data->chip_id];
- ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
+ ram_size = big_ram->ram_size[dev_data->chip_id];
+
+ reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
+ block_size = reg_val &
+ BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
+ : 128;
strncpy(type_name, big_ram->instance_name,
strlen(big_ram->instance_name));
@@ -3602,7 +3661,7 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
mem_name,
0,
ram_size,
- BIG_RAM_BLOCK_SIZE_BYTES * 8,
+ block_size * 8,
false, type_name, false, 0);
/* Read and dump Big RAM data */
@@ -3610,12 +3669,13 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
return offset + ram_size;
/* Dump Big RAM */
- for (i = 0; i < total_blocks / 2; i++) {
+ for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
+ i++) {
u32 addr, len;
qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
- len = 2 * BIG_RAM_BLOCK_SIZE_DWORDS;
+ len = BRB_REG_BIG_RAM_DATA_SIZE;
offset += qed_grc_dump_addr_range(p_hwfn,
p_ptt,
dump_buf + offset,
@@ -3649,7 +3709,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
dump,
NULL,
BYTES_TO_DWORDS(MCP_REG_SCRATCH),
- MCP_REG_SCRATCH_SIZE,
+ MCP_REG_SCRATCH_SIZE_BB_K2,
false, 0, false, "MCP", false, 0);
/* Dump MCP cpu_reg_file */
@@ -3710,7 +3770,6 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
phy_defs->tbus_data_lo_addr;
data_hi_addr = phy_defs->base_addr +
phy_defs->tbus_data_hi_addr;
- bytes_buf = (u8 *)(dump_buf + offset);
if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
phy_defs->phy_name) < 0)
@@ -3730,6 +3789,7 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
continue;
}
+ bytes_buf = (u8 *)(dump_buf + offset);
for (tbus_hi_offset = 0;
tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
tbus_hi_offset++) {
@@ -3778,19 +3838,17 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u32 block_id, line_id, offset = 0;
- /* Skip static debug if a debug bus recording is in progress */
- if (qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
+ /* Don't dump static debug if a debug bus recording is in progress */
+ if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
return 0;
if (dump) {
- DP_VERBOSE(p_hwfn,
- QED_MSG_DEBUG, "Dumping static debug data...\n");
-
/* Disable all blocks debug output */
for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
struct block_defs *block = s_block_defs[block_id];
- if (block->has_dbg_bus[dev_data->chip_id])
+ if (block->dbg_client_id[dev_data->chip_id] !=
+ MAX_DBG_BUS_CLIENTS)
qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr,
0);
}
@@ -3811,12 +3869,12 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
u32 block_dwords, addr, len;
u8 dbg_client_id;
- if (!block->has_dbg_bus[dev_data->chip_id])
+ if (block->dbg_client_id[dev_data->chip_id] ==
+ MAX_DBG_BUS_CLIENTS)
continue;
- block_desc =
- get_dbg_bus_block_desc(p_hwfn,
- (enum block_id)block_id);
+ block_desc = get_dbg_bus_block_desc(p_hwfn,
+ (enum block_id)block_id);
block_dwords = NUM_DBG_LINES(block_desc) *
STATIC_DEBUG_LINE_DWORDS;
@@ -4044,7 +4102,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
dump_buf + offset, dump);
/* Dump last section */
- offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+ offset += qed_dump_last_section(dump_buf, offset, dump);
if (dump) {
/* Unstall storms */
@@ -4253,30 +4311,33 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
if (!check_rule && dump)
continue;
+ if (!dump) {
+ u32 entry_dump_size =
+ qed_idle_chk_dump_failure(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ false,
+ rule->rule_id,
+ rule,
+ 0,
+ NULL);
+
+ offset += num_reg_entries * entry_dump_size;
+ (*num_failing_rules) += num_reg_entries;
+ continue;
+ }
+
/* Go over all register entries (number of entries is the same
* for all condition registers).
*/
for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
u32 next_reg_offset = 0;
- if (!dump) {
- offset += qed_idle_chk_dump_failure(p_hwfn,
- p_ptt,
- dump_buf + offset,
- false,
- rule->rule_id,
- rule,
- entry_id,
- NULL);
- (*num_failing_rules)++;
- break;
- }
-
/* Read current entry of all condition registers */
for (reg_id = 0; reg_id < rule->num_cond_regs;
reg_id++) {
const struct dbg_idle_chk_cond_reg *reg =
- &cond_regs[reg_id];
+ &cond_regs[reg_id];
u32 padded_entry_size, addr;
bool wide_bus;
@@ -4291,9 +4352,9 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
if (reg->num_entries > 1 ||
reg->start_entry > 0) {
padded_entry_size =
- reg->entry_size > 1 ?
- roundup_pow_of_two(reg->entry_size)
- : 1;
+ reg->entry_size > 1 ?
+ roundup_pow_of_two(reg->entry_size) :
+ 1;
addr += (reg->start_entry + entry_id) *
padded_entry_size;
}
@@ -4329,7 +4390,6 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
entry_id,
cond_reg_values);
(*num_failing_rules)++;
- break;
}
}
}
@@ -4402,7 +4462,7 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
dump, "num_rules", num_failing_rules);
/* Dump last section */
- offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+ offset += qed_dump_last_section(dump_buf, offset, dump);
return offset;
}
@@ -4474,7 +4534,7 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
(nvram_offset_bytes +
read_offset) |
(bytes_to_copy <<
- DRV_MB_PARAM_NVM_LEN_SHIFT),
+ DRV_MB_PARAM_NVM_LEN_OFFSET),
&ret_mcp_resp, &ret_mcp_param,
&ret_read_size,
(u32 *)((u8 *)ret_buf + read_offset)))
@@ -4701,7 +4761,7 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
offset += trace_meta_size_dwords;
/* Dump last section */
- offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+ offset += qed_dump_last_section(dump_buf, offset, dump);
*num_dumped_dwords = offset;
@@ -4717,7 +4777,7 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
bool dump, u32 *num_dumped_dwords)
{
- u32 dwords_read, size_param_offset, offset = 0;
+ u32 dwords_read, size_param_offset, offset = 0, addr, len;
bool fifo_has_data;
*num_dumped_dwords = 0;
@@ -4753,14 +4813,18 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
* buffer size since more entries could be added to the buffer as we are
* emptying it.
*/
+ addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
+ len = REG_FIFO_ELEMENT_DWORDS;
for (dwords_read = 0;
fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
- dwords_read += REG_FIFO_ELEMENT_DWORDS, offset +=
- REG_FIFO_ELEMENT_DWORDS) {
- if (qed_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO,
- (u64)(uintptr_t)(&dump_buf[offset]),
- REG_FIFO_ELEMENT_DWORDS, 0))
- return DBG_STATUS_DMAE_FAILED;
+ dwords_read += REG_FIFO_ELEMENT_DWORDS) {
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ true,
+ addr,
+ len,
+ true);
fifo_has_data = qed_rd(p_hwfn, p_ptt,
GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
}
@@ -4769,7 +4833,7 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
dwords_read);
out:
/* Dump last section */
- offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+ offset += qed_dump_last_section(dump_buf, offset, dump);
*num_dumped_dwords = offset;
@@ -4782,7 +4846,7 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
bool dump, u32 *num_dumped_dwords)
{
- u32 dwords_read, size_param_offset, offset = 0;
+ u32 dwords_read, size_param_offset, offset = 0, addr, len;
bool fifo_has_data;
*num_dumped_dwords = 0;
@@ -4818,16 +4882,19 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
* buffer size since more entries could be added to the buffer as we are
* emptying it.
*/
+ addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
+ len = IGU_FIFO_ELEMENT_DWORDS;
for (dwords_read = 0;
fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
- dwords_read += IGU_FIFO_ELEMENT_DWORDS, offset +=
- IGU_FIFO_ELEMENT_DWORDS) {
- if (qed_dmae_grc2host(p_hwfn, p_ptt,
- IGU_REG_ERROR_HANDLING_MEMORY,
- (u64)(uintptr_t)(&dump_buf[offset]),
- IGU_FIFO_ELEMENT_DWORDS, 0))
- return DBG_STATUS_DMAE_FAILED;
- fifo_has_data = qed_rd(p_hwfn, p_ptt,
+ dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ true,
+ addr,
+ len,
+ true);
+ fifo_has_data = qed_rd(p_hwfn, p_ptt,
IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
}
@@ -4835,7 +4902,7 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
dwords_read);
out:
/* Dump last section */
- offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+ offset += qed_dump_last_section(dump_buf, offset, dump);
*num_dumped_dwords = offset;
@@ -4849,7 +4916,7 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
bool dump,
u32 *num_dumped_dwords)
{
- u32 size_param_offset, override_window_dwords, offset = 0;
+ u32 size_param_offset, override_window_dwords, offset = 0, addr;
*num_dumped_dwords = 0;
@@ -4875,20 +4942,21 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
/* Add override window info to buffer */
override_window_dwords =
- qed_rd(p_hwfn, p_ptt,
- GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
- PROTECTION_OVERRIDE_ELEMENT_DWORDS;
- if (qed_dmae_grc2host(p_hwfn, p_ptt,
- GRC_REG_PROTECTION_OVERRIDE_WINDOW,
- (u64)(uintptr_t)(dump_buf + offset),
- override_window_dwords, 0))
- return DBG_STATUS_DMAE_FAILED;
- offset += override_window_dwords;
+ qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
+ PROTECTION_OVERRIDE_ELEMENT_DWORDS;
+ addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ true,
+ addr,
+ override_window_dwords,
+ true);
qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
override_window_dwords);
out:
/* Dump last section */
- offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+ offset += qed_dump_last_section(dump_buf, offset, dump);
*num_dumped_dwords = offset;
@@ -4952,9 +5020,9 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
next_list_idx_addr = fw_asserts_section_addr +
DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
- last_list_idx = (next_list_idx > 0
- ? next_list_idx
- : asserts->list_num_elements) - 1;
+ last_list_idx = (next_list_idx > 0 ?
+ next_list_idx :
+ asserts->list_num_elements) - 1;
addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
asserts->list_dword_offset +
last_list_idx * asserts->list_element_dword_size;
@@ -4967,7 +5035,7 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
}
/* Dump last section */
- offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+ offset += qed_dump_last_section(dump_buf, offset, dump);
return offset;
}
@@ -5596,10 +5664,6 @@ struct igu_fifo_addr_data {
#define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4
-/********************************* Macros ************************************/
-
-#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
-
/***************************** Constant Arrays *******************************/
struct user_dbg_array {
@@ -5698,6 +5762,7 @@ static struct block_info s_block_info_arr[] = {
{"phy_pcie", BLOCK_PHY_PCIE},
{"led", BLOCK_LED},
{"avs_wrap", BLOCK_AVS_WRAP},
+ {"pxpreqbus", BLOCK_PXPREQBUS},
{"misc_aeu", BLOCK_MISC_AEU},
{"bar0_map", BLOCK_BAR0_MAP}
};
@@ -5830,8 +5895,8 @@ static const char * const s_status_str[] = {
/* DBG_STATUS_MCP_COULD_NOT_RESUME */
"Failed to resume MCP after halt",
- /* DBG_STATUS_DMAE_FAILED */
- "DMAE transaction failed",
+ /* DBG_STATUS_RESERVED2 */
+ "Reserved debug status - shouldn't be returned",
/* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
"Failed to empty SEMI sync FIFO",
@@ -6109,6 +6174,7 @@ static u32 qed_read_param(u32 *dump_buf,
if (*(char_buf + offset++)) {
/* String param */
*param_str_val = char_buf + offset;
+ *param_num_val = 0;
offset += strlen(*param_str_val) + 1;
if (offset & 0x3)
offset += (4 - (offset & 0x3));
@@ -6177,8 +6243,7 @@ static u32 qed_print_section_params(u32 *dump_buf,
/* Parses the idle check rules and returns the number of characters printed.
* In case of parsing error, returns 0.
*/
-static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
+static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf,
u32 *dump_buf_end,
u32 num_rules,
bool print_fw_idle_chk,
@@ -6322,8 +6387,7 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
* parsed_results_bytes.
* The parsing status is returned.
*/
-static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
+static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
u32 num_dumped_dwords,
char *results_buf,
u32 *parsed_results_bytes,
@@ -6375,13 +6439,16 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
results_offset),
"FW_IDLE_CHECK:\n");
rules_print_size =
- qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
- dump_buf_end, num_rules,
+ qed_parse_idle_chk_dump_rules(dump_buf,
+ dump_buf_end,
+ num_rules,
true,
results_buf ?
results_buf +
- results_offset : NULL,
- num_errors, num_warnings);
+ results_offset :
+ NULL,
+ num_errors,
+ num_warnings);
results_offset += rules_print_size;
if (!rules_print_size)
return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
@@ -6392,13 +6459,16 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
results_offset),
"\nLSI_IDLE_CHECK:\n");
rules_print_size =
- qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
- dump_buf_end, num_rules,
+ qed_parse_idle_chk_dump_rules(dump_buf,
+ dump_buf_end,
+ num_rules,
false,
results_buf ?
results_buf +
- results_offset : NULL,
- num_errors, num_warnings);
+ results_offset :
+ NULL,
+ num_errors,
+ num_warnings);
results_offset += rules_print_size;
if (!rules_print_size)
return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
@@ -6537,7 +6607,6 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
*/
static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
- u32 num_dumped_dwords,
char *results_buf,
u32 *parsed_results_bytes)
{
@@ -6725,9 +6794,7 @@ free_mem:
* parsed_results_bytes.
* The parsing status is returned.
*/
-static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
+static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
char *results_buf,
u32 *parsed_results_bytes)
{
@@ -6834,8 +6901,7 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
*element, char
*results_buf,
- u32 *results_offset,
- u32 *parsed_results_bytes)
+ u32 *results_offset)
{
const struct igu_fifo_addr_data *found_addr = NULL;
u8 source, err_type, i, is_cleanup;
@@ -6933,9 +6999,9 @@ static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
"cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
prod_cons,
update_flag ? "update" : "nop",
- en_dis_int_for_sb
- ? (en_dis_int_for_sb == 1 ? "disable" : "nop")
- : "enable",
+ en_dis_int_for_sb ?
+ (en_dis_int_for_sb == 1 ? "disable" : "nop") :
+ "enable",
segment ? "attn" : "regular",
timer_mask);
}
@@ -6969,9 +7035,7 @@ out:
* parsed_results_bytes.
* The parsing status is returned.
*/
-static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
+static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
char *results_buf,
u32 *parsed_results_bytes)
{
@@ -7011,8 +7075,7 @@ static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
for (i = 0; i < num_elements; i++) {
status = qed_parse_igu_fifo_element(&elements[i],
results_buf,
- &results_offset,
- parsed_results_bytes);
+ &results_offset);
if (status != DBG_STATUS_OK)
return status;
}
@@ -7028,9 +7091,7 @@ static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
}
static enum dbg_status
-qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
+qed_parse_protection_override_dump(u32 *dump_buf,
char *results_buf,
u32 *parsed_results_bytes)
{
@@ -7105,9 +7166,7 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
* parsed_results_bytes.
* The parsing status is returned.
*/
-static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
+static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
char *results_buf,
u32 *parsed_results_bytes)
{
@@ -7209,8 +7268,7 @@ enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
{
u32 num_errors, num_warnings;
- return qed_parse_idle_chk_dump(p_hwfn,
- dump_buf,
+ return qed_parse_idle_chk_dump(dump_buf,
num_dumped_dwords,
NULL,
results_buf_size,
@@ -7221,12 +7279,12 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
char *results_buf,
- u32 *num_errors, u32 *num_warnings)
+ u32 *num_errors,
+ u32 *num_warnings)
{
u32 parsed_buf_size;
- return qed_parse_idle_chk_dump(p_hwfn,
- dump_buf,
+ return qed_parse_idle_chk_dump(dump_buf,
num_dumped_dwords,
results_buf,
&parsed_buf_size,
@@ -7245,9 +7303,7 @@ enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
u32 *results_buf_size)
{
return qed_parse_mcp_trace_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- NULL, results_buf_size);
+ dump_buf, NULL, results_buf_size);
}
enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
@@ -7259,7 +7315,6 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
return qed_parse_mcp_trace_dump(p_hwfn,
dump_buf,
- num_dumped_dwords,
results_buf, &parsed_buf_size);
}
@@ -7268,10 +7323,7 @@ enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
u32 num_dumped_dwords,
u32 *results_buf_size)
{
- return qed_parse_reg_fifo_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- NULL, results_buf_size);
+ return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
}
enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
@@ -7281,10 +7333,7 @@ enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
{
u32 parsed_buf_size;
- return qed_parse_reg_fifo_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- results_buf, &parsed_buf_size);
+ return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
}
enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
@@ -7292,10 +7341,7 @@ enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
u32 num_dumped_dwords,
u32 *results_buf_size)
{
- return qed_parse_igu_fifo_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- NULL, results_buf_size);
+ return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
}
enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
@@ -7305,10 +7351,7 @@ enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
{
u32 parsed_buf_size;
- return qed_parse_igu_fifo_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- results_buf, &parsed_buf_size);
+ return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
}
enum dbg_status
@@ -7317,9 +7360,7 @@ qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
u32 num_dumped_dwords,
u32 *results_buf_size)
{
- return qed_parse_protection_override_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
+ return qed_parse_protection_override_dump(dump_buf,
NULL, results_buf_size);
}
@@ -7330,9 +7371,7 @@ enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
{
u32 parsed_buf_size;
- return qed_parse_protection_override_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
+ return qed_parse_protection_override_dump(dump_buf,
results_buf,
&parsed_buf_size);
}
@@ -7342,10 +7381,7 @@ enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
u32 num_dumped_dwords,
u32 *results_buf_size)
{
- return qed_parse_fw_asserts_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- NULL, results_buf_size);
+ return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
}
enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
@@ -7355,9 +7391,7 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
{
u32 parsed_buf_size;
- return qed_parse_fw_asserts_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
+ return qed_parse_fw_asserts_dump(dump_buf,
results_buf, &parsed_buf_size);
}
@@ -7386,30 +7420,30 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
/* Go over registers with a non-zero attention status */
for (i = 0; i < num_regs; i++) {
+ struct dbg_attn_bit_mapping *bit_mapping;
struct dbg_attn_reg_result *reg_result;
- struct dbg_attn_bit_mapping *mapping;
u8 num_reg_attn, bit_idx = 0;
reg_result = &results->reg_results[i];
num_reg_attn = GET_FIELD(reg_result->data,
DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES];
- mapping = &((struct dbg_attn_bit_mapping *)
- block_attn->ptr)[reg_result->block_attn_offset];
+ bit_mapping = &((struct dbg_attn_bit_mapping *)
+ block_attn->ptr)[reg_result->block_attn_offset];
pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS];
/* Go over attention status bits */
for (j = 0; j < num_reg_attn; j++) {
- u16 attn_idx_val = GET_FIELD(mapping[j].data,
+ u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
DBG_ATTN_BIT_MAPPING_VAL);
const char *attn_name, *attn_type_str, *masked_str;
- u32 name_offset, sts_addr;
+ u32 attn_name_offset, sts_addr;
/* Check if bit mask should be advanced (due to unused
* bits).
*/
- if (GET_FIELD(mapping[j].data,
+ if (GET_FIELD(bit_mapping[j].data,
DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
bit_idx += (u8)attn_idx_val;
continue;
@@ -7422,9 +7456,10 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
}
/* Find attention name */
- name_offset = block_attn_name_offsets[attn_idx_val];
+ attn_name_offset =
+ block_attn_name_offsets[attn_idx_val];
attn_name = &((const char *)
- pstrings->ptr)[name_offset];
+ pstrings->ptr)[attn_name_offset];
attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ?
"Interrupt" : "Parity";
masked_str = reg_result->mask_val & BIT(bit_idx) ?
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 58a689fb04db..553a6d17260e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -758,7 +758,7 @@ static void qed_init_qm_info(struct qed_hwfn *p_hwfn)
/* This function reconfigures the QM pf on the fly.
* For this purpose we:
* 1. reconfigure the QM database
- * 2. set new values to runtime arrat
+ * 2. set new values to runtime array
* 3. send an sdm_qm_cmd through the rbc interface to stop the QM
* 4. activate init tool in QM_PF stage
* 5. send an sdm_qm_cmd through rbc interface to release the QM
@@ -784,7 +784,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_init_clear_rt_data(p_hwfn);
/* prepare QM portion of runtime array */
- qed_qm_init_pf(p_hwfn, p_ptt);
+ qed_qm_init_pf(p_hwfn, p_ptt, false);
/* activate init tool on runtime array */
rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
@@ -1515,7 +1515,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
}
- /* Protocl Configuration */
+ /* Protocol Configuration */
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
(p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0);
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
@@ -1527,6 +1527,11 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
+ /* Sanity check before the PF init sequence that uses DMAE */
+ rc = qed_dmae_sanity(p_hwfn, p_ptt, "pf_phase");
+ if (rc)
+ return rc;
+
/* PF Init sequence */
rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
if (rc)
@@ -2192,7 +2197,7 @@ qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
/* No need for a case for QED_CMDQS_CQS since
* CNQ/CMDQS are the same resource.
*/
- resc_max_val = NUM_OF_CMDQS_CQS;
+ resc_max_val = NUM_OF_GLOBAL_QUEUES;
break;
case QED_RDMA_STATS_QUEUE:
resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
@@ -2267,7 +2272,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
case QED_RDMA_CNQ_RAM:
case QED_CMDQS_CQS:
/* CNQ/CMDQS are the same resource */
- *p_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
+ *p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs;
break;
case QED_RDMA_STATS_QUEUE:
*p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
index df195c02b711..2dc9b312a795 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -115,7 +115,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
struct qed_fcoe_pf_params *fcoe_pf_params = NULL;
struct fcoe_init_ramrod_params *p_ramrod = NULL;
struct fcoe_init_func_ramrod_data *p_data;
- struct fcoe_conn_context *p_cxt = NULL;
+ struct e4_fcoe_conn_context *p_cxt = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
struct qed_cxt_info cxt_info;
@@ -167,7 +167,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
}
p_cxt = cxt_info.p_cxt;
SET_FIELD(p_cxt->tstorm_ag_context.flags3,
- TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
+ E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
fcoe_pf_params->dummy_icid = (u16)dummy_cid;
@@ -568,7 +568,7 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
{
- struct fcoe_task_context *p_task_ctx = NULL;
+ struct e4_fcoe_task_context *p_task_ctx = NULL;
int rc;
u32 i;
@@ -580,13 +580,13 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
if (rc)
continue;
- memset(p_task_ctx, 0, sizeof(struct fcoe_task_context));
+ memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context));
SET_FIELD(p_task_ctx->timer_context.logical_client_0,
TIMERS_CONTEXT_VALIDLC0, 1);
SET_FIELD(p_task_ctx->timer_context.logical_client_1,
TIMERS_CONTEXT_VALIDLC1, 1);
SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
- TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
+ E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 3427fe7049b5..de873d770575 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -54,7 +54,7 @@
struct qed_hwfn;
struct qed_ptt;
-/* opcodes for the event ring */
+/* Opcodes for the event ring */
enum common_event_opcode {
COMMON_EVENT_PF_START,
COMMON_EVENT_PF_STOP,
@@ -82,487 +82,7 @@ enum common_ramrod_cmd_id {
MAX_COMMON_RAMROD_CMD_ID
};
-/* The core storm context for the Ystorm */
-struct ystorm_core_conn_st_ctx {
- __le32 reserved[4];
-};
-
-/* The core storm context for the Pstorm */
-struct pstorm_core_conn_st_ctx {
- __le32 reserved[4];
-};
-
-/* Core Slowpath Connection storm context of Xstorm */
-struct xstorm_core_conn_st_ctx {
- __le32 spq_base_lo;
- __le32 spq_base_hi;
- struct regpair consolid_base_addr;
- __le16 spq_cons;
- __le16 consolid_cons;
- __le32 reserved0[55];
-};
-
-struct xstorm_core_conn_ag_ctx {
- u8 reserved0;
- u8 core_state;
- u8 flags0;
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
- u8 flags1;
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
- u8 flags2;
-#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
- u8 flags3;
-#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
- u8 flags4;
-#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
- u8 flags5;
-#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
- u8 flags6;
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
- u8 flags7;
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
- u8 flags8;
-#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
- u8 flags9;
-#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
- u8 flags10;
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
- u8 flags11;
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
- u8 flags12;
-#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
- u8 flags13;
-#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
- u8 flags14;
-#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
- u8 byte2;
- __le16 physical_q0;
- __le16 consolid_prod;
- __le16 reserved16;
- __le16 tx_bd_cons;
- __le16 tx_bd_or_spq_prod;
- __le16 word5;
- __le16 conn_dpi;
- u8 byte3;
- u8 byte4;
- u8 byte5;
- u8 byte6;
- __le32 reg0;
- __le32 reg1;
- __le32 reg2;
- __le32 reg3;
- __le32 reg4;
- __le32 reg5;
- __le32 reg6;
- __le16 word7;
- __le16 word8;
- __le16 word9;
- __le16 word10;
- __le32 reg7;
- __le32 reg8;
- __le32 reg9;
- u8 byte7;
- u8 byte8;
- u8 byte9;
- u8 byte10;
- u8 byte11;
- u8 byte12;
- u8 byte13;
- u8 byte14;
- u8 byte15;
- u8 e5_reserved;
- __le16 word11;
- __le32 reg10;
- __le32 reg11;
- __le32 reg12;
- __le32 reg13;
- __le32 reg14;
- __le32 reg15;
- __le32 reg16;
- __le32 reg17;
- __le32 reg18;
- __le32 reg19;
- __le16 word12;
- __le16 word13;
- __le16 word14;
- __le16 word15;
-};
-
-struct tstorm_core_conn_ag_ctx {
- u8 byte0;
- u8 byte1;
- u8 flags0;
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
- u8 flags1;
-#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
- u8 flags2;
-#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
-#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
-#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
- u8 flags3;
-#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
-#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
-#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
- u8 flags4;
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
- u8 flags5;
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
- __le32 reg0;
- __le32 reg1;
- __le32 reg2;
- __le32 reg3;
- __le32 reg4;
- __le32 reg5;
- __le32 reg6;
- __le32 reg7;
- __le32 reg8;
- u8 byte2;
- u8 byte3;
- __le16 word0;
- u8 byte4;
- u8 byte5;
- __le16 word1;
- __le16 word2;
- __le16 word3;
- __le32 reg9;
- __le32 reg10;
-};
-
-struct ustorm_core_conn_ag_ctx {
- u8 reserved;
- u8 byte1;
- u8 flags0;
-#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
-#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
-#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
-#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
- u8 flags2;
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
- u8 flags3;
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
- u8 byte2;
- u8 byte3;
- __le16 word0;
- __le16 word1;
- __le32 rx_producers;
- __le32 reg1;
- __le32 reg2;
- __le32 reg3;
- __le16 word2;
- __le16 word3;
-};
-
-/* The core storm context for the Mstorm */
-struct mstorm_core_conn_st_ctx {
- __le32 reserved[24];
-};
-
-/* The core storm context for the Ustorm */
-struct ustorm_core_conn_st_ctx {
- __le32 reserved[4];
-};
-
-/* core connection context */
-struct core_conn_context {
- struct ystorm_core_conn_st_ctx ystorm_st_context;
- struct regpair ystorm_st_padding[2];
- struct pstorm_core_conn_st_ctx pstorm_st_context;
- struct regpair pstorm_st_padding[2];
- struct xstorm_core_conn_st_ctx xstorm_st_context;
- struct xstorm_core_conn_ag_ctx xstorm_ag_context;
- struct tstorm_core_conn_ag_ctx tstorm_ag_context;
- struct ustorm_core_conn_ag_ctx ustorm_ag_context;
- struct mstorm_core_conn_st_ctx mstorm_st_context;
- struct ustorm_core_conn_st_ctx ustorm_st_context;
- struct regpair ustorm_st_padding[2];
-};
-
+/* How ll2 should deal with packet upon errors */
enum core_error_handle {
LL2_DROP_PACKET,
LL2_DO_NOTHING,
@@ -570,21 +90,25 @@ enum core_error_handle {
MAX_CORE_ERROR_HANDLE
};
+/* Opcodes for the event ring */
enum core_event_opcode {
CORE_EVENT_TX_QUEUE_START,
CORE_EVENT_TX_QUEUE_STOP,
CORE_EVENT_RX_QUEUE_START,
CORE_EVENT_RX_QUEUE_STOP,
CORE_EVENT_RX_QUEUE_FLUSH,
+ CORE_EVENT_TX_QUEUE_UPDATE,
MAX_CORE_EVENT_OPCODE
};
+/* The L4 pseudo checksum mode for Core */
enum core_l4_pseudo_checksum_mode {
CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH,
CORE_L4_PSEUDO_CSUM_ZERO_LENGTH,
MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
};
+/* Light-L2 RX Producers in Tstorm RAM */
struct core_ll2_port_stats {
struct regpair gsi_invalid_hdr;
struct regpair gsi_invalid_pkt_length;
@@ -592,6 +116,7 @@ struct core_ll2_port_stats {
struct regpair gsi_crcchksm_error;
};
+/* Ethernet TX Per Queue Stats */
struct core_ll2_pstorm_per_queue_stat {
struct regpair sent_ucast_bytes;
struct regpair sent_mcast_bytes;
@@ -601,6 +126,7 @@ struct core_ll2_pstorm_per_queue_stat {
struct regpair sent_bcast_pkts;
};
+/* Light-L2 RX Producers in Tstorm RAM */
struct core_ll2_rx_prod {
__le16 bd_prod;
__le16 cqe_prod;
@@ -621,6 +147,7 @@ struct core_ll2_ustorm_per_queue_stat {
struct regpair rcv_bcast_pkts;
};
+/* Core Ramrod Command IDs (light L2) */
enum core_ramrod_cmd_id {
CORE_RAMROD_UNUSED,
CORE_RAMROD_RX_QUEUE_START,
@@ -628,53 +155,64 @@ enum core_ramrod_cmd_id {
CORE_RAMROD_RX_QUEUE_STOP,
CORE_RAMROD_TX_QUEUE_STOP,
CORE_RAMROD_RX_QUEUE_FLUSH,
+ CORE_RAMROD_TX_QUEUE_UPDATE,
MAX_CORE_RAMROD_CMD_ID
};
+/* Core RX CQE Type for Light L2 */
enum core_roce_flavor_type {
CORE_ROCE,
CORE_RROCE,
MAX_CORE_ROCE_FLAVOR_TYPE
};
+/* Specifies how ll2 should deal with packets errors: packet_too_big and
+ * no_buff.
+ */
struct core_rx_action_on_error {
u8 error_type;
#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK 0x3
-#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
-#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3
-#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2
-#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF
-#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4
};
+/* Core RX BD for Light L2 */
struct core_rx_bd {
struct regpair addr;
__le16 reserved[4];
};
+/* Core RX CM offload BD for Light L2 */
struct core_rx_bd_with_buff_len {
struct regpair addr;
__le16 buff_length;
__le16 reserved[3];
};
+/* Core RX CM offload BD for Light L2 */
union core_rx_bd_union {
struct core_rx_bd rx_bd;
struct core_rx_bd_with_buff_len rx_bd_with_len;
};
+/* Opaque Data for Light L2 RX CQE */
struct core_rx_cqe_opaque_data {
__le32 data[2];
};
+/* Core RX CQE Type for Light L2 */
enum core_rx_cqe_type {
- CORE_RX_CQE_ILLIGAL_TYPE,
+ CORE_RX_CQE_ILLEGAL_TYPE,
CORE_RX_CQE_TYPE_REGULAR,
CORE_RX_CQE_TYPE_GSI_OFFLOAD,
CORE_RX_CQE_TYPE_SLOW_PATH,
MAX_CORE_RX_CQE_TYPE
};
+/* Core RX CQE for Light L2 */
struct core_rx_fast_path_cqe {
u8 type;
u8 placement_offset;
@@ -687,6 +225,7 @@ struct core_rx_fast_path_cqe {
__le32 reserved1[3];
};
+/* Core Rx CM offload CQE */
struct core_rx_gsi_offload_cqe {
u8 type;
u8 data_length_error;
@@ -696,9 +235,11 @@ struct core_rx_gsi_offload_cqe {
__le32 src_mac_addrhi;
__le16 src_mac_addrlo;
__le16 qp_id;
- __le32 gid_dst[4];
+ __le32 src_qp;
+ __le32 reserved[3];
};
+/* Core RX CQE for Light L2 */
struct core_rx_slow_path_cqe {
u8 type;
u8 ramrod_cmd_id;
@@ -707,12 +248,14 @@ struct core_rx_slow_path_cqe {
__le32 reserved1[5];
};
+/* Core RX CM offload BD for Light L2 */
union core_rx_cqe_union {
struct core_rx_fast_path_cqe rx_cqe_fp;
struct core_rx_gsi_offload_cqe rx_cqe_gsi;
struct core_rx_slow_path_cqe rx_cqe_sp;
};
+/* Ramrod data for rx queue start ramrod */
struct core_rx_start_ramrod_data {
struct regpair bd_base;
struct regpair cqe_pbl_addr;
@@ -723,16 +266,18 @@ struct core_rx_start_ramrod_data {
u8 complete_event_flg;
u8 drop_ttl0_flg;
__le16 num_of_pbl_pages;
- u8 inner_vlan_removal_en;
+ u8 inner_vlan_stripping_en;
+ u8 report_outer_vlan;
u8 queue_id;
u8 main_func_queue;
u8 mf_si_bcast_accept_all;
u8 mf_si_mcast_accept_all;
struct core_rx_action_on_error action_on_error;
u8 gsi_offload_flag;
- u8 reserved[7];
+ u8 reserved[6];
};
+/* Ramrod data for rx queue stop ramrod */
struct core_rx_stop_ramrod_data {
u8 complete_cqe_flg;
u8 complete_event_flg;
@@ -741,46 +286,51 @@ struct core_rx_stop_ramrod_data {
__le16 reserved2[2];
};
+/* Flags for Core TX BD */
struct core_tx_bd_data {
__le16 as_bitfield;
-#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
-#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
-#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
-#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
-#define CORE_TX_BD_DATA_START_BD_MASK 0x1
-#define CORE_TX_BD_DATA_START_BD_SHIFT 2
-#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1
-#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
-#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1
-#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
-#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1
-#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
-#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1
-#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
+#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
+#define CORE_TX_BD_DATA_START_BD_MASK 0x1
+#define CORE_TX_BD_DATA_START_BD_SHIFT 2
+#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
+#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
+#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1
+#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
+#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1
+#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1
-#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
-#define CORE_TX_BD_DATA_NBDS_MASK 0xF
-#define CORE_TX_BD_DATA_NBDS_SHIFT 8
-#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
-#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
-#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1
-#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
-#define CORE_TX_BD_DATA_RESERVED0_MASK 0x3
-#define CORE_TX_BD_DATA_RESERVED0_SHIFT 14
-};
-
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
+#define CORE_TX_BD_DATA_NBDS_MASK 0xF
+#define CORE_TX_BD_DATA_NBDS_SHIFT 8
+#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
+#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
+#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1
+#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
+#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_MASK 0x1
+#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_SHIFT 14
+#define CORE_TX_BD_DATA_RESERVED0_MASK 0x1
+#define CORE_TX_BD_DATA_RESERVED0_SHIFT 15
+};
+
+/* Core TX BD for Light L2 */
struct core_tx_bd {
struct regpair addr;
__le16 nbytes;
__le16 nw_vlan_or_lb_echo;
struct core_tx_bd_data bd_data;
__le16 bitfield1;
-#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
-#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
-#define CORE_TX_BD_TX_DST_MASK 0x3
-#define CORE_TX_BD_TX_DST_SHIFT 14
+#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
+#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
+#define CORE_TX_BD_TX_DST_MASK 0x3
+#define CORE_TX_BD_TX_DST_SHIFT 14
};
+/* Light L2 TX Destination */
enum core_tx_dest {
CORE_TX_DEST_NW,
CORE_TX_DEST_LB,
@@ -789,6 +339,7 @@ enum core_tx_dest {
MAX_CORE_TX_DEST
};
+/* Ramrod data for tx queue start ramrod */
struct core_tx_start_ramrod_data {
struct regpair pbl_base_addr;
__le16 mtu;
@@ -803,10 +354,20 @@ struct core_tx_start_ramrod_data {
u8 resrved[3];
};
+/* Ramrod data for tx queue stop ramrod */
struct core_tx_stop_ramrod_data {
__le32 reserved0[2];
};
+/* Ramrod data for tx queue update ramrod */
+struct core_tx_update_ramrod_data {
+ u8 update_qm_pq_id_flg;
+ u8 reserved0;
+ __le16 qm_pq_id;
+ __le32 reserved1[1];
+};
+
+/* Enum flag for what type of dcb data to update */
enum dcb_dscp_update_mode {
DONT_UPDATE_DCB_DSCP,
UPDATE_DCB,
@@ -815,6 +376,487 @@ enum dcb_dscp_update_mode {
MAX_DCB_DSCP_UPDATE_MODE
};
+/* The core storm context for the Ystorm */
+struct ystorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* The core storm context for the Pstorm */
+struct pstorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* Core Slowpath Connection storm context of Xstorm */
+struct xstorm_core_conn_st_ctx {
+ __le32 spq_base_lo;
+ __le32 spq_base_hi;
+ struct regpair consolid_base_addr;
+ __le16 spq_cons;
+ __le16 consolid_cons;
+ __le32 reserved0[55];
+};
+
+struct e4_xstorm_core_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 consolid_prod;
+ __le16 reserved16;
+ __le16 tx_bd_cons;
+ __le16 tx_bd_or_spq_prod;
+ __le16 word5;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le16 word7;
+ __le16 word8;
+ __le16 word9;
+ __le16 word10;
+ __le32 reg7;
+ __le32 reg8;
+ __le32 reg9;
+ u8 byte7;
+ u8 byte8;
+ u8 byte9;
+ u8 byte10;
+ u8 byte11;
+ u8 byte12;
+ u8 byte13;
+ u8 byte14;
+ u8 byte15;
+ u8 e5_reserved;
+ __le16 word11;
+ __le32 reg10;
+ __le32 reg11;
+ __le32 reg12;
+ __le32 reg13;
+ __le32 reg14;
+ __le32 reg15;
+ __le32 reg16;
+ __le32 reg17;
+ __le32 reg18;
+ __le32 reg19;
+ __le16 word12;
+ __le16 word13;
+ __le16 word14;
+ __le16 word15;
+};
+
+struct e4_tstorm_core_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
+ u8 flags4;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ u8 byte4;
+ u8 byte5;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le32 reg9;
+ __le32 reg10;
+};
+
+struct e4_ustorm_core_conn_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ u8 flags0;
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 rx_producers;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
+};
+
+/* The core storm context for the Mstorm */
+struct mstorm_core_conn_st_ctx {
+ __le32 reserved[24];
+};
+
+/* The core storm context for the Ustorm */
+struct ustorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* core connection context */
+struct e4_core_conn_context {
+ struct ystorm_core_conn_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2];
+ struct pstorm_core_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2];
+ struct xstorm_core_conn_st_ctx xstorm_st_context;
+ struct e4_xstorm_core_conn_ag_ctx xstorm_ag_context;
+ struct e4_tstorm_core_conn_ag_ctx tstorm_ag_context;
+ struct e4_ustorm_core_conn_ag_ctx ustorm_ag_context;
+ struct mstorm_core_conn_st_ctx mstorm_st_context;
+ struct ustorm_core_conn_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
+};
+
struct eth_mstorm_per_pf_stat {
struct regpair gre_discard_pkts;
struct regpair vxlan_discard_pkts;
@@ -896,6 +938,50 @@ struct eth_ustorm_per_queue_stat {
struct regpair rcv_bcast_pkts;
};
+/* Event Ring VF-PF Channel data */
+struct vf_pf_channel_eqe_data {
+ struct regpair msg_addr;
+};
+
+/* Event Ring malicious VF data */
+struct malicious_vf_eqe_data {
+ u8 vf_id;
+ u8 err_id;
+ __le16 reserved[3];
+};
+
+/* Event Ring initial cleanup data */
+struct initial_cleanup_eqe_data {
+ u8 vf_id;
+ u8 reserved[7];
+};
+
+/* Event Data Union */
+union event_ring_data {
+ u8 bytes[8];
+ struct vf_pf_channel_eqe_data vf_pf_channel;
+ struct iscsi_eqe_data iscsi_info;
+ struct iscsi_connect_done_results iscsi_conn_done_info;
+ union rdma_eqe_data rdma_data;
+ struct malicious_vf_eqe_data malicious_vf;
+ struct initial_cleanup_eqe_data vf_init_cleanup;
+};
+
+/* Event Ring Entry */
+struct event_ring_entry {
+ u8 protocol_id;
+ u8 opcode;
+ __le16 reserved0;
+ __le16 echo;
+ u8 fw_return_code;
+ u8 flags;
+#define EVENT_RING_ENTRY_ASYNC_MASK 0x1
+#define EVENT_RING_ENTRY_ASYNC_SHIFT 0
+#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F
+#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
+ union event_ring_data data;
+};
+
/* Event Ring Next Page Address */
struct event_ring_next_addr {
struct regpair addr;
@@ -908,12 +994,21 @@ union event_ring_element {
struct event_ring_next_addr next_addr;
};
+/* Ports mode */
enum fw_flow_ctrl_mode {
flow_ctrl_pause,
flow_ctrl_pfc,
MAX_FW_FLOW_CTRL_MODE
};
+/* GFT profile type */
+enum gft_profile_type {
+ GFT_PROFILE_TYPE_4_TUPLE,
+ GFT_PROFILE_TYPE_L4_DST_PORT,
+ GFT_PROFILE_TYPE_IP_DST_PORT,
+ MAX_GFT_PROFILE_TYPE
+};
+
/* Major and Minor hsi Versions */
struct hsi_fp_ver_struct {
u8 minor_ver_arr[2];
@@ -921,14 +1016,14 @@ struct hsi_fp_ver_struct {
};
enum iwarp_ll2_tx_queues {
- IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
+ IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
IWARP_LL2_ALIGNED_TX_QUEUE,
IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE,
IWARP_LL2_ERROR,
MAX_IWARP_LL2_TX_QUEUES
};
-/* Mstorm non-triggering VF zone */
+/* Malicious VF error ID */
enum malicious_vf_error_id {
MALICIOUS_VF_NO_ERROR,
VF_PF_CHANNEL_NOT_READY,
@@ -951,9 +1046,11 @@ enum malicious_vf_error_id {
ETH_TUNN_IPV6_EXT_NBD_ERR,
ETH_CONTROL_PACKET_VIOLATION,
ETH_ANTI_SPOOFING_ERR,
+ ETH_PACKET_SIZE_TOO_LARGE,
MAX_MALICIOUS_VF_ERROR_ID
};
+/* Mstorm non-triggering VF zone */
struct mstorm_non_trigger_vf_zone {
struct eth_mstorm_per_queue_stat eth_queue_stat;
struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD];
@@ -962,7 +1059,21 @@ struct mstorm_non_trigger_vf_zone {
/* Mstorm VF zone */
struct mstorm_vf_zone {
struct mstorm_non_trigger_vf_zone non_trigger;
+};
+
+/* vlan header including TPID and TCI fields */
+struct vlan_header {
+ __le16 tpid;
+ __le16 tci;
+};
+/* outer tag configurations */
+struct outer_tag_config_struct {
+ u8 enable_stag_pri_change;
+ u8 pri_map_valid;
+ u8 reserved[2];
+ struct vlan_header outer_tag;
+ u8 inner_to_outer_pri_map[8];
};
/* personality per PF */
@@ -974,7 +1085,7 @@ enum personality_type {
PERSONALITY_RDMA,
PERSONALITY_CORE,
PERSONALITY_ETH,
- PERSONALITY_RESERVED4,
+ PERSONALITY_RESERVED,
MAX_PERSONALITY_TYPE
};
@@ -997,7 +1108,6 @@ struct pf_start_ramrod_data {
struct regpair event_ring_pbl_addr;
struct regpair consolid_q_pbl_addr;
struct pf_start_tunnel_config tunnel_config;
- __le32 reserved;
__le16 event_ring_sb_id;
u8 base_vf_id;
u8 num_vfs;
@@ -1011,21 +1121,22 @@ struct pf_start_ramrod_data {
u8 mf_mode;
u8 integ_phase;
u8 allow_npar_tx_switching;
- u8 inner_to_outer_pri_map[8];
- u8 pri_map_valid;
- __le32 outer_tag;
+ u8 reserved0;
struct hsi_fp_ver_struct hsi_fp_ver;
+ struct outer_tag_config_struct outer_tag_config;
};
+/* Data for port update ramrod */
struct protocol_dcb_data {
u8 dcb_enable_flag;
- u8 reserved_a;
+ u8 dscp_enable_flag;
u8 dcb_priority;
u8 dcb_tc;
- u8 reserved_b;
+ u8 dscp_val;
u8 reserved0;
};
+/* Update tunnel configuration */
struct pf_update_tunnel_config {
u8 update_rx_pf_clss;
u8 update_rx_def_ucast_clss;
@@ -1042,8 +1153,8 @@ struct pf_update_tunnel_config {
__le16 reserved;
};
+/* Data for port update ramrod */
struct pf_update_ramrod_data {
- u8 pf_id;
u8 update_eth_dcb_data_mode;
u8 update_fcoe_dcb_data_mode;
u8 update_iscsi_dcb_data_mode;
@@ -1051,6 +1162,7 @@ struct pf_update_ramrod_data {
u8 update_rroce_dcb_data_mode;
u8 update_iwarp_dcb_data_mode;
u8 update_mf_vlan_flag;
+ u8 update_enable_stag_pri_change;
struct protocol_dcb_data eth_dcb_data;
struct protocol_dcb_data fcoe_dcb_data;
struct protocol_dcb_data iscsi_dcb_data;
@@ -1058,7 +1170,8 @@ struct pf_update_ramrod_data {
struct protocol_dcb_data rroce_dcb_data;
struct protocol_dcb_data iwarp_dcb_data;
__le16 mf_vlan;
- __le16 reserved;
+ u8 enable_stag_pri_change;
+ u8 reserved;
struct pf_update_tunnel_config tunnel_config;
};
@@ -1079,11 +1192,13 @@ enum protocol_version_array_key {
MAX_PROTOCOL_VERSION_ARRAY_KEY
};
+/* RDMA TX Stats */
struct rdma_sent_stats {
struct regpair sent_bytes;
struct regpair sent_pkts;
};
+/* Pstorm non-triggering VF zone */
struct pstorm_non_trigger_vf_zone {
struct eth_pstorm_per_queue_stat eth_queue_stat;
struct rdma_sent_stats rdma_stats;
@@ -1103,11 +1218,34 @@ struct ramrod_header {
__le16 echo;
};
+/* RDMA RX Stats */
struct rdma_rcv_stats {
struct regpair rcv_bytes;
struct regpair rcv_pkts;
};
+/* Data for update QCN/DCQCN RL ramrod */
+struct rl_update_ramrod_data {
+ u8 qcn_update_param_flg;
+ u8 dcqcn_update_param_flg;
+ u8 rl_init_flg;
+ u8 rl_start_flg;
+ u8 rl_stop_flg;
+ u8 rl_id_first;
+ u8 rl_id_last;
+ u8 rl_dc_qcn_flg;
+ __le32 rl_bc_rate;
+ __le16 rl_max_rate;
+ __le16 rl_r_ai;
+ __le16 rl_r_hai;
+ __le16 dcqcn_g;
+ __le32 dcqcn_k_us;
+ __le32 dcqcn_timeuot_us;
+ __le32 qcn_timeuot_us;
+ __le32 reserved[2];
+};
+
+/* Slowpath Element (SPQE) */
struct slow_path_element {
struct ramrod_header hdr;
struct regpair data_ptr;
@@ -1130,11 +1268,12 @@ struct tstorm_per_port_stat {
struct regpair roce_irregular_pkt;
struct regpair iwarp_irregular_pkt;
struct regpair eth_irregular_pkt;
- struct regpair reserved1;
+ struct regpair toe_irregular_pkt;
struct regpair preroce_irregular_pkt;
struct regpair eth_gre_tunn_filter_discard;
struct regpair eth_vxlan_tunn_filter_discard;
struct regpair eth_geneve_tunn_filter_discard;
+ struct regpair eth_gft_drop_pkt;
};
/* Tstorm VF zone */
@@ -1197,6 +1336,7 @@ struct vf_stop_ramrod_data {
__le32 reserved2;
};
+/* VF zone size mode */
enum vf_zone_size_mode {
VF_ZONE_SIZE_MODE_DEFAULT,
VF_ZONE_SIZE_MODE_DOUBLE,
@@ -1204,6 +1344,7 @@ enum vf_zone_size_mode {
MAX_VF_ZONE_SIZE_MODE
};
+/* Attentions status block */
struct atten_status_block {
__le32 atten_bits;
__le32 atten_ack;
@@ -1212,12 +1353,6 @@ struct atten_status_block {
__le32 reserved1;
};
-enum command_type_bit {
- IGU_COMMAND_TYPE_NOP = 0,
- IGU_COMMAND_TYPE_SET = 1,
- MAX_COMMAND_TYPE_BIT
-};
-
/* DMAE command */
struct dmae_cmd {
__le32 opcode;
@@ -1327,74 +1462,74 @@ enum dmae_cmd_src_enum {
MAX_DMAE_CMD_SRC_ENUM
};
-struct mstorm_core_conn_ag_ctx {
+struct e4_mstorm_core_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct ystorm_core_conn_ag_ctx {
+struct e4_ystorm_core_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -1545,22 +1680,22 @@ struct qm_rf_opportunistic_mask {
};
/* QM hardware structure of QM map memory */
-struct qm_rf_pq_map {
+struct qm_rf_pq_map_e4 {
__le32 reg;
-#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1
-#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0
-#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF
-#define QM_RF_PQ_MAP_RL_ID_SHIFT 1
-#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF
-#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9
-#define QM_RF_PQ_MAP_VOQ_MASK 0x1F
-#define QM_RF_PQ_MAP_VOQ_SHIFT 18
-#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3
-#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
-#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1
-#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25
-#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F
-#define QM_RF_PQ_MAP_RESERVED_SHIFT 26
+#define QM_RF_PQ_MAP_E4_PQ_VALID_MASK 0x1
+#define QM_RF_PQ_MAP_E4_PQ_VALID_SHIFT 0
+#define QM_RF_PQ_MAP_E4_RL_ID_MASK 0xFF
+#define QM_RF_PQ_MAP_E4_RL_ID_SHIFT 1
+#define QM_RF_PQ_MAP_E4_VP_PQ_ID_MASK 0x1FF
+#define QM_RF_PQ_MAP_E4_VP_PQ_ID_SHIFT 9
+#define QM_RF_PQ_MAP_E4_VOQ_MASK 0x1F
+#define QM_RF_PQ_MAP_E4_VOQ_SHIFT 18
+#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_MASK 0x3
+#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_SHIFT 23
+#define QM_RF_PQ_MAP_E4_RL_VALID_MASK 0x1
+#define QM_RF_PQ_MAP_E4_RL_VALID_SHIFT 25
+#define QM_RF_PQ_MAP_E4_RESERVED_MASK 0x3F
+#define QM_RF_PQ_MAP_E4_RESERVED_SHIFT 26
};
/* Completion params for aggregated interrupt completion */
@@ -1643,8 +1778,8 @@ enum block_addr {
GRCBASE_MULD = 0x4e0000,
GRCBASE_YULD = 0x4c8000,
GRCBASE_XYLD = 0x4c0000,
- GRCBASE_PTLD = 0x590000,
- GRCBASE_YPLD = 0x5b0000,
+ GRCBASE_PTLD = 0x5a0000,
+ GRCBASE_YPLD = 0x5c0000,
GRCBASE_PRM = 0x230000,
GRCBASE_PBF_PB1 = 0xda0000,
GRCBASE_PBF_PB2 = 0xda4000,
@@ -1675,6 +1810,7 @@ enum block_addr {
GRCBASE_PHY_PCIE = 0x620000,
GRCBASE_LED = 0x6b8000,
GRCBASE_AVS_WRAP = 0x6b0000,
+ GRCBASE_PXPREQBUS = 0x56000,
GRCBASE_MISC_AEU = 0x8000,
GRCBASE_BAR0_MAP = 0x1c00000,
MAX_BLOCK_ADDR
@@ -1766,6 +1902,7 @@ enum block_id {
BLOCK_PHY_PCIE,
BLOCK_LED,
BLOCK_AVS_WRAP,
+ BLOCK_PXPREQBUS,
BLOCK_MISC_AEU,
BLOCK_BAR0_MAP,
MAX_BLOCK_ID
@@ -1841,7 +1978,7 @@ struct dbg_attn_block_result {
struct dbg_attn_reg_result reg_results[15];
};
-/* mode header */
+/* Mode header */
struct dbg_mode_hdr {
__le16 data;
#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1
@@ -1863,80 +2000,83 @@ struct dbg_attn_reg {
__le32 mask_address;
};
-/* attention types */
+/* Attention types */
enum dbg_attn_type {
ATTN_TYPE_INTERRUPT,
ATTN_TYPE_PARITY,
MAX_DBG_ATTN_TYPE
};
+/* Debug Bus block data */
struct dbg_bus_block {
u8 num_of_lines;
u8 has_latency_events;
__le16 lines_offset;
};
+/* Debug Bus block user data */
struct dbg_bus_block_user_data {
u8 num_of_lines;
u8 has_latency_events;
__le16 names_offset;
};
+/* Block Debug line data */
struct dbg_bus_line {
u8 data;
-#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF
-#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0
-#define DBG_BUS_LINE_IS_256B_MASK 0x1
-#define DBG_BUS_LINE_IS_256B_SHIFT 4
-#define DBG_BUS_LINE_RESERVED_MASK 0x7
-#define DBG_BUS_LINE_RESERVED_SHIFT 5
+#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF
+#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0
+#define DBG_BUS_LINE_IS_256B_MASK 0x1
+#define DBG_BUS_LINE_IS_256B_SHIFT 4
+#define DBG_BUS_LINE_RESERVED_MASK 0x7
+#define DBG_BUS_LINE_RESERVED_SHIFT 5
u8 group_sizes;
};
-/* condition header for registers dump */
+/* Condition header for registers dump */
struct dbg_dump_cond_hdr {
struct dbg_mode_hdr mode; /* Mode header */
u8 block_id; /* block ID */
u8 data_size; /* size in dwords of the data following this header */
};
-/* memory data for registers dump */
+/* Memory data for registers dump */
struct dbg_dump_mem {
__le32 dword0;
-#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF
-#define DBG_DUMP_MEM_ADDRESS_SHIFT 0
-#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF
-#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
+#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_ADDRESS_SHIFT 0
+#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF
+#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
__le32 dword1;
-#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
-#define DBG_DUMP_MEM_LENGTH_SHIFT 0
-#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1
-#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24
-#define DBG_DUMP_MEM_RESERVED_MASK 0x7F
-#define DBG_DUMP_MEM_RESERVED_SHIFT 25
+#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_LENGTH_SHIFT 0
+#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24
+#define DBG_DUMP_MEM_RESERVED_MASK 0x7F
+#define DBG_DUMP_MEM_RESERVED_SHIFT 25
};
-/* register data for registers dump */
+/* Register data for registers dump */
struct dbg_dump_reg {
__le32 data;
-#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF /* register address (in dwords) */
-#define DBG_DUMP_REG_ADDRESS_SHIFT 0
-#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1 /* indicates register is wide-bus */
-#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
-#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */
-#define DBG_DUMP_REG_LENGTH_SHIFT 24
+#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF
+#define DBG_DUMP_REG_ADDRESS_SHIFT 0
+#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
+#define DBG_DUMP_REG_LENGTH_MASK 0xFF
+#define DBG_DUMP_REG_LENGTH_SHIFT 24
};
-/* split header for registers dump */
+/* Split header for registers dump */
struct dbg_dump_split_hdr {
__le32 hdr;
-#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF
-#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0
-#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF
-#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
};
-/* condition header for idle check */
+/* Condition header for idle check */
struct dbg_idle_chk_cond_hdr {
struct dbg_mode_hdr mode; /* Mode header */
__le16 data_size; /* size in dwords of the data following this header */
@@ -1945,12 +2085,12 @@ struct dbg_idle_chk_cond_hdr {
/* Idle Check condition register */
struct dbg_idle_chk_cond_reg {
__le32 data;
-#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF
-#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
-#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1
-#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
-#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
-#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
__le16 num_entries;
u8 entry_size;
u8 start_entry;
@@ -1959,12 +2099,12 @@ struct dbg_idle_chk_cond_reg {
/* Idle Check info register */
struct dbg_idle_chk_info_reg {
__le32 data;
-#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF
-#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
-#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1
-#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
-#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
-#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
__le16 size; /* register size in dwords */
struct dbg_mode_hdr mode; /* Mode header */
};
@@ -2016,13 +2156,13 @@ struct dbg_idle_chk_rule {
/* Idle Check rule parsing data */
struct dbg_idle_chk_rule_parsing_data {
__le32 data;
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
};
-/* idle check severity types */
+/* Idle check severity types */
enum dbg_idle_chk_severity_types {
/* idle check failure should cause an error */
IDLE_CHK_SEVERITY_ERROR,
@@ -2036,14 +2176,14 @@ enum dbg_idle_chk_severity_types {
/* Debug Bus block data */
struct dbg_bus_block_data {
__le16 data;
-#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK 0xF
-#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT 0
-#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK 0xF
-#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT 4
-#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK 0xF
-#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT 8
-#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK 0xF
-#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT 12
+#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT 0
+#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT 4
+#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT 8
+#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT 12
u8 line_num;
u8 hw_id;
};
@@ -2072,6 +2212,7 @@ enum dbg_bus_clients {
MAX_DBG_BUS_CLIENTS
};
+/* Debug Bus constraint operation types */
enum dbg_bus_constraint_ops {
DBG_BUS_CONSTRAINT_OP_EQ,
DBG_BUS_CONSTRAINT_OP_NE,
@@ -2086,12 +2227,13 @@ enum dbg_bus_constraint_ops {
MAX_DBG_BUS_CONSTRAINT_OPS
};
+/* Debug Bus trigger state data */
struct dbg_bus_trigger_state_data {
u8 data;
-#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK 0xF
-#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT 0
-#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK 0xF
-#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT 4
+#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK 0xF
+#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT 0
+#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK 0xF
+#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT 4
};
/* Debug Bus memory address */
@@ -2165,6 +2307,7 @@ struct dbg_bus_data {
struct dbg_bus_storm_data storms[6];
};
+/* Debug bus filter types */
enum dbg_bus_filter_types {
DBG_BUS_FILTER_TYPE_OFF,
DBG_BUS_FILTER_TYPE_PRE,
@@ -2181,6 +2324,7 @@ enum dbg_bus_frame_modes {
MAX_DBG_BUS_FRAME_MODES
};
+/* Debug bus other engine mode */
enum dbg_bus_other_engine_modes {
DBG_BUS_OTHER_ENGINE_MODE_NONE,
DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
@@ -2190,12 +2334,14 @@ enum dbg_bus_other_engine_modes {
MAX_DBG_BUS_OTHER_ENGINE_MODES
};
+/* Debug bus post-trigger recording types */
enum dbg_bus_post_trigger_types {
DBG_BUS_POST_TRIGGER_RECORD,
DBG_BUS_POST_TRIGGER_DROP,
MAX_DBG_BUS_POST_TRIGGER_TYPES
};
+/* Debug bus pre-trigger recording types */
enum dbg_bus_pre_trigger_types {
DBG_BUS_PRE_TRIGGER_START_FROM_ZERO,
DBG_BUS_PRE_TRIGGER_NUM_CHUNKS,
@@ -2203,11 +2349,10 @@ enum dbg_bus_pre_trigger_types {
MAX_DBG_BUS_PRE_TRIGGER_TYPES
};
+/* Debug bus SEMI frame modes */
enum dbg_bus_semi_frame_modes {
- DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST =
- 0,
- DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST =
- 3,
+ DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0,
+ DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3,
MAX_DBG_BUS_SEMI_FRAME_MODES
};
@@ -2220,6 +2365,7 @@ enum dbg_bus_states {
MAX_DBG_BUS_STATES
};
+/* Debug Bus Storm modes */
enum dbg_bus_storm_modes {
DBG_BUS_STORM_MODE_PRINTF,
DBG_BUS_STORM_MODE_PRAM_ADDR,
@@ -2352,7 +2498,7 @@ enum dbg_status {
DBG_STATUS_MCP_TRACE_NO_META,
DBG_STATUS_MCP_COULD_NOT_HALT,
DBG_STATUS_MCP_COULD_NOT_RESUME,
- DBG_STATUS_DMAE_FAILED,
+ DBG_STATUS_RESERVED2,
DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
DBG_STATUS_IGU_FIFO_BAD_DATA,
DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
@@ -2396,7 +2542,8 @@ struct dbg_tools_data {
u8 chip_id;
u8 platform_id;
u8 initialized;
- u8 reserved;
+ u8 use_dmae;
+ __le32 num_regs_read;
};
/********************************/
@@ -2406,6 +2553,7 @@ struct dbg_tools_data {
/* Number of VLAN priorities */
#define NUM_OF_VLAN_PRIORITIES 8
+/* BRB RAM init requirements */
struct init_brb_ram_req {
__le32 guranteed_per_tc;
__le32 headroom_per_tc;
@@ -2414,17 +2562,20 @@ struct init_brb_ram_req {
u8 num_active_tcs[MAX_NUM_PORTS];
};
+/* ETS per-TC init requirements */
struct init_ets_tc_req {
u8 use_sp;
u8 use_wfq;
__le16 weight;
};
+/* ETS init requirements */
struct init_ets_req {
__le32 mtu;
struct init_ets_tc_req tc_req[NUM_OF_TCS];
};
+/* NIG LB RL init requirements */
struct init_nig_lb_rl_req {
__le16 lb_mac_rate;
__le16 lb_rate;
@@ -2432,15 +2583,18 @@ struct init_nig_lb_rl_req {
__le16 tc_rate[NUM_OF_PHYS_TCS];
};
+/* NIG TC mapping for each priority */
struct init_nig_pri_tc_map_entry {
u8 tc_id;
u8 valid;
};
+/* NIG priority to TC map init requirements */
struct init_nig_pri_tc_map_req {
struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES];
};
+/* QM per-port init parameters */
struct init_qm_port_params {
u8 active;
u8 active_phys_tcs;
@@ -2563,7 +2717,7 @@ struct bin_buffer_hdr {
__le32 length;
};
-/* binary init buffer types */
+/* Binary init buffer types */
enum bin_init_buffer_type {
BIN_BUF_INIT_FW_VER_INFO,
BIN_BUF_INIT_CMD,
@@ -2793,6 +2947,7 @@ struct iro {
};
/***************************** Public Functions *******************************/
+
/**
* @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug
* arrays.
@@ -2802,6 +2957,18 @@ struct iro {
enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr);
/**
+ * @brief qed_read_regs - Reads registers into a buffer (using GRC).
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf - Destination buffer.
+ * @param addr - Source GRC address in dwords.
+ * @param len - Number of registers to read.
+ */
+void qed_read_regs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len);
+
+/**
* @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
* default value.
*
@@ -3119,6 +3286,7 @@ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
#define MAX_NAME_LEN 16
/***************************** Public Functions *******************************/
+
/**
* @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with
* debug arrays.
@@ -3172,6 +3340,18 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
u32 *num_warnings);
/**
+ * @brief qed_dbg_mcp_trace_set_meta_data - Sets a pointer to the MCP Trace
+ * meta data.
+ *
+ * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to
+ * no NVRAM access).
+ *
+ * @param data - pointer to MCP Trace meta data
+ * @param size - size of MCP Trace meta data in dwords
+ */
+void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size);
+
+/**
* @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size
* for MCP Trace results (in bytes).
*
@@ -3607,6 +3787,9 @@ static const u32 dbg_bus_blocks[] = {
0x00000000, /* bar0_map, bb, 0 lines */
0x00000000, /* bar0_map, k2, 0 lines */
0x00000000,
+ 0x00000000, /* bar0_map, bb, 0 lines */
+ 0x00000000, /* bar0_map, k2, 0 lines */
+ 0x00000000,
};
/* Win 2 */
@@ -3645,7 +3828,6 @@ static const u32 dbg_bus_blocks[] = {
* Returns the required host memory size in 4KB units.
* Must be called before all QM init HSI functions.
*
- * @param pf_id - physical function ID
* @param num_pf_cids - number of connections used by this PF
* @param num_vf_cids - number of connections used by VFs of this PF
* @param num_tids - number of tasks used by this PF
@@ -3654,8 +3836,7 @@ static const u32 dbg_bus_blocks[] = {
*
* @return The required host memory size in 4KB units.
*/
-u32 qed_qm_pf_mem_size(u8 pf_id,
- u32 num_pf_cids,
+u32 qed_qm_pf_mem_size(u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs);
@@ -3676,7 +3857,7 @@ struct qed_qm_pf_rt_init_params {
u8 port_id;
u8 pf_id;
u8 max_phys_tcs_per_port;
- bool is_first_pf;
+ bool is_pf_loading;
u32 num_pf_cids;
u32 num_vf_cids;
u32 num_tids;
@@ -3687,6 +3868,7 @@ struct qed_qm_pf_rt_init_params {
u8 num_vports;
u16 pf_wfq;
u32 pf_rl;
+ u32 link_speed;
struct init_qm_pq_params *pq_params;
struct init_qm_vport_params *vport_params;
};
@@ -3744,11 +3926,14 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
* @param p_ptt - ptt window used for writing the registers
* @param vport_id - VPORT ID
* @param vport_rl - rate limit in Mb/sec units
+ * @param link_speed - link speed in Mbps.
*
* @return 0 on success, -1 on error.
*/
int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl);
+ struct qed_ptt *p_ptt,
+ u8 vport_id, u32 vport_rl, u32 link_speed);
+
/**
* @brief qed_send_qm_stop_cmd Sends a stop command to the QM
*
@@ -3759,7 +3944,8 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
* @param start_pq - first PQ ID to stop
* @param num_pqs - Number of PQs to stop, starting from start_pq.
*
- * @return bool, true if successful, false if timeout occured while waiting for QM command done.
+ * @return bool, true if successful, false if timeout occurred while waiting for
+ * QM command done.
*/
bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -3769,6 +3955,7 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
/**
* @brief qed_set_vxlan_dest_port - initializes vxlan tunnel destination udp port
*
+ * @param p_hwfn
* @param p_ptt - ptt window used for writing the registers.
* @param dest_port - vxlan destination udp port.
*/
@@ -3778,6 +3965,7 @@ void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
/**
* @brief qed_set_vxlan_enable - enable or disable VXLAN tunnel in HW
*
+ * @param p_hwfn
* @param p_ptt - ptt window used for writing the registers.
* @param vxlan_enable - vxlan enable flag.
*/
@@ -3787,6 +3975,7 @@ void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
/**
* @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
*
+ * @param p_hwfn
* @param p_ptt - ptt window used for writing the registers.
* @param eth_gre_enable - eth GRE enable enable flag.
* @param ip_gre_enable - IP GRE enable enable flag.
@@ -3798,6 +3987,7 @@ void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
/**
* @brief qed_set_geneve_dest_port - initializes geneve tunnel destination udp port
*
+ * @param p_hwfn
* @param p_ptt - ptt window used for writing the registers.
* @param dest_port - geneve destination udp port.
*/
@@ -3814,612 +4004,921 @@ void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool eth_geneve_enable, bool ip_geneve_enable);
-void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u16 pf_id);
-void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- u16 pf_id, bool tcp, bool udp,
- bool ipv4, bool ipv6);
-
-#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
-#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
-#define TSTORM_PORT_STAT_OFFSET(port_id) \
+
+/**
+ * @brief qed_gft_disable - Disable GFT
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param pf_id - pf on which to disable GFT.
+ */
+void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id);
+
+/**
+ * @brief qed_gft_config - Enable and configure HW for GFT
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param pf_id - pf on which to enable GFT.
+ * @param tcp - set profile tcp packets.
+ * @param udp - set profile udp packet.
+ * @param ipv4 - set profile ipv4 packet.
+ * @param ipv6 - set profile ipv6 packet.
+ * @param profile_type - define packet same fields. Use enum gft_profile_type.
+ */
+void qed_gft_config(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 pf_id,
+ bool tcp,
+ bool udp,
+ bool ipv4, bool ipv6, enum gft_profile_type profile_type);
+
+/**
+ * @brief qed_enable_context_validation - Enable and configure context
+ * validation.
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers.
+ */
+void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_calc_session_ctx_validation - Calcualte validation byte for
+ * session context.
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param cid - context cid.
+ */
+void qed_calc_session_ctx_validation(void *p_ctx_mem,
+ u16 ctx_size, u8 ctx_type, u32 cid);
+
+/**
+ * @brief qed_calc_task_ctx_validation - Calcualte validation byte for task
+ * context.
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param tid - context tid.
+ */
+void qed_calc_task_ctx_validation(void *p_ctx_mem,
+ u16 ctx_size, u8 ctx_type, u32 tid);
+
+/**
+ * @brief qed_memset_session_ctx - Memset session context to 0 while
+ * preserving validation bytes.
+ *
+ * @param p_hwfn -
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
+ */
+void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
+
+/**
+ * @brief qed_memset_task_ctx - Memset task context to 0 while preserving
+ * validation bytes.
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
+ */
+void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
+
+/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
+#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
+#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
+
+/* Tstorm port statistics */
+#define TSTORM_PORT_STAT_OFFSET(port_id) \
(IRO[1].base + ((port_id) * IRO[1].m1))
-#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
+#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
+
+/* Tstorm ll2 port statistics */
#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
(IRO[2].base + ((port_id) * IRO[2].m1))
#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size)
-#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
+
+/* Ustorm VF-PF Channel ready flag */
+#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
(IRO[3].base + ((vf_id) * IRO[3].m1))
-#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
-#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
- (IRO[4].base + (pf_id) * IRO[4].m1)
-#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size)
-#define USTORM_EQE_CONS_OFFSET(pf_id) \
+#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
+
+/* Ustorm Final flr cleanup ack */
+#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
+ (IRO[4].base + ((pf_id) * IRO[4].m1))
+#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size)
+
+/* Ustorm Event ring consumer */
+#define USTORM_EQE_CONS_OFFSET(pf_id) \
(IRO[5].base + ((pf_id) * IRO[5].m1))
-#define USTORM_EQE_CONS_SIZE (IRO[5].size)
-#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \
+#define USTORM_EQE_CONS_SIZE (IRO[5].size)
+
+/* Ustorm eth queue zone */
+#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \
(IRO[6].base + ((queue_zone_id) * IRO[6].m1))
-#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size)
-#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
+#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size)
+
+/* Ustorm Common Queue ring consumer */
+#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
(IRO[7].base + ((queue_zone_id) * IRO[7].m1))
-#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size)
+#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size)
+
+/* Xstorm Integration Test Data */
+#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size)
+
+/* Ystorm Integration Test Data */
+#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
+
+/* Pstorm Integration Test Data */
+#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
+
+/* Tstorm Integration Test Data */
+#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
+
+/* Mstorm Integration Test Data */
+#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
+
+/* Ustorm Integration Test Data */
+#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base)
+#define USTORM_INTEG_TEST_DATA_SIZE (IRO[13].size)
+
+/* Tstorm producers */
#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \
- (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1))
+ (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1))
#define TSTORM_LL2_RX_PRODS_SIZE (IRO[14].size)
+
+/* Tstorm LightL2 queue statistics */
#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
(IRO[15].base + ((core_rx_queue_id) * IRO[15].m1))
#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size)
+
+/* Ustorm LiteL2 queue statistics */
#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
- (IRO[16].base + ((core_rx_queue_id) * IRO[16].m1))
+ (IRO[16].base + ((core_rx_queue_id) * IRO[16].m1))
#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[16].size)
+
+/* Pstorm LiteL2 queue statistics */
#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
- (IRO[17].base + ((core_tx_stats_id) * IRO[17].m1))
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[17]. size)
-#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[17].base + ((core_tx_stats_id) * IRO[17].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[17].size)
+
+/* Mstorm queue statistics */
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
(IRO[18].base + ((stat_counter_id) * IRO[18].m1))
-#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size)
-#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
+#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size)
+
+/* Mstorm ETH PF queues producers */
+#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
(IRO[19].base + ((queue_id) * IRO[19].m1))
-#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size)
+#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size)
+
+/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size
+ * mode.
+ */
#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \
- (IRO[20].base + ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2))
+ (IRO[20].base + ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2))
#define MSTORM_ETH_VF_PRODS_SIZE (IRO[20].size)
-#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[21].base)
-#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size)
-#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+
+/* TPA agregation timeout in us resolution (on ASIC) */
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[21].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size)
+
+/* Mstorm pf statistics */
+#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
(IRO[22].base + ((pf_id) * IRO[22].m1))
-#define MSTORM_ETH_PF_STAT_SIZE (IRO[22].size)
-#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+#define MSTORM_ETH_PF_STAT_SIZE (IRO[22].size)
+
+/* Ustorm queue statistics */
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
(IRO[23].base + ((stat_counter_id) * IRO[23].m1))
-#define USTORM_QUEUE_STAT_SIZE (IRO[23].size)
-#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \
+#define USTORM_QUEUE_STAT_SIZE (IRO[23].size)
+
+/* Ustorm pf statistics */
+#define USTORM_ETH_PF_STAT_OFFSET(pf_id)\
(IRO[24].base + ((pf_id) * IRO[24].m1))
-#define USTORM_ETH_PF_STAT_SIZE (IRO[24].size)
-#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+#define USTORM_ETH_PF_STAT_SIZE (IRO[24].size)
+
+/* Pstorm queue statistics */
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
(IRO[25].base + ((stat_counter_id) * IRO[25].m1))
-#define PSTORM_QUEUE_STAT_SIZE (IRO[25].size)
-#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+#define PSTORM_QUEUE_STAT_SIZE (IRO[25].size)
+
+/* Pstorm pf statistics */
+#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
(IRO[26].base + ((pf_id) * IRO[26].m1))
-#define PSTORM_ETH_PF_STAT_SIZE (IRO[26].size)
-#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethtype) \
- (IRO[27].base + ((ethtype) * IRO[27].m1))
-#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[27].size)
-#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[28].base)
-#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[28].size)
-#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
+#define PSTORM_ETH_PF_STAT_SIZE (IRO[26].size)
+
+/* Control frame's EthType configuration for TX control frame security */
+#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(eth_type_id) \
+ (IRO[27].base + ((eth_type_id) * IRO[27].m1))
+#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[27].size)
+
+/* Tstorm last parser message */
+#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[28].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[28].size)
+
+/* Tstorm Eth limit Rx rate */
+#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
(IRO[29].base + ((pf_id) * IRO[29].m1))
-#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size)
-#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
+#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size)
+
+/* Xstorm queue zone */
+#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
(IRO[30].base + ((queue_id) * IRO[30].m1))
-#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size)
+#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size)
+
+/* Ystorm cqe producer */
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+ (IRO[31].base + ((rss_id) * IRO[31].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE (IRO[31].size)
+
+/* Ustorm cqe producer */
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+ (IRO[32].base + ((rss_id) * IRO[32].m1))
+#define USTORM_TOE_CQ_PROD_SIZE (IRO[32].size)
+
+/* Ustorm grq producer */
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
+ (IRO[33].base + ((pf_id) * IRO[33].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE (IRO[33].size)
+
+/* Tstorm cmdq-cons of given command queue-id */
#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
- (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1))
-#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size)
+ (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size)
+
+/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
+ * BDqueue-id.
+ */
#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
- (IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2))
-#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size)
+ (IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size)
+
+/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
- (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
-#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
+ (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
+
+/* Tstorm iSCSI RX stats */
#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[37].base + ((pf_id) * IRO[37].m1))
-#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size)
+ (IRO[37].base + ((pf_id) * IRO[37].m1))
+#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size)
+
+/* Mstorm iSCSI RX stats */
#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[38].base + ((pf_id) * IRO[38].m1))
-#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
+ (IRO[38].base + ((pf_id) * IRO[38].m1))
+#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
+
+/* Ustorm iSCSI RX stats */
#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[39].base + ((pf_id) * IRO[39].m1))
-#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
+ (IRO[39].base + ((pf_id) * IRO[39].m1))
+#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
+
+/* Xstorm iSCSI TX stats */
#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[40].base + ((pf_id) * IRO[40].m1))
-#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size)
+ (IRO[40].base + ((pf_id) * IRO[40].m1))
+#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size)
+
+/* Ystorm iSCSI TX stats */
#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[41].base + ((pf_id) * IRO[41].m1))
-#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
+ (IRO[41].base + ((pf_id) * IRO[41].m1))
+#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
+
+/* Pstorm iSCSI TX stats */
#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[42].base + ((pf_id) * IRO[42].m1))
-#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
-#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
- (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1))
-#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size)
-#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
- (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1))
-#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
+ (IRO[42].base + ((pf_id) * IRO[42].m1))
+#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
+
+/* Tstorm FCoE RX stats */
#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
- (IRO[43].base + ((pf_id) * IRO[43].m1))
+ (IRO[43].base + ((pf_id) * IRO[43].m1))
+#define TSTORM_FCOE_RX_STATS_SIZE (IRO[43].size)
+
+/* Pstorm FCoE TX stats */
#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
(IRO[44].base + ((pf_id) * IRO[44].m1))
+#define PSTORM_FCOE_TX_STATS_SIZE (IRO[44].size)
+
+/* Pstorm RDMA queue statistics */
+#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1))
+#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size)
-static const struct iro iro_arr[49] = {
+/* Tstorm RDMA queue statistics */
+#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1))
+#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
+
+/* Xstorm iWARP rxmit stats */
+#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \
+ (IRO[47].base + ((pf_id) * IRO[47].m1))
+#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[47].size)
+
+/* Tstorm RoCE Event Statistics */
+#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \
+ (IRO[48].base + ((roce_pf_id) * IRO[48].m1))
+#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[48].size)
+
+/* DCQCN Received Statistics */
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \
+ (IRO[49].base + ((roce_pf_id) * IRO[49].m1))
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[49].size)
+
+/* DCQCN Sent Statistics */
+#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \
+ (IRO[50].base + ((roce_pf_id) * IRO[50].m1))
+#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[50].size)
+
+static const struct iro iro_arr[51] = {
{0x0, 0x0, 0x0, 0x0, 0x8},
- {0x4cb0, 0x80, 0x0, 0x0, 0x80},
- {0x6518, 0x20, 0x0, 0x0, 0x20},
+ {0x4cb8, 0x88, 0x0, 0x0, 0x88},
+ {0x6530, 0x20, 0x0, 0x0, 0x20},
{0xb00, 0x8, 0x0, 0x0, 0x4},
{0xa80, 0x8, 0x0, 0x0, 0x4},
{0x0, 0x8, 0x0, 0x0, 0x2},
{0x80, 0x8, 0x0, 0x0, 0x4},
{0x84, 0x8, 0x0, 0x0, 0x2},
+ {0x4c48, 0x0, 0x0, 0x0, 0x78},
+ {0x3e18, 0x0, 0x0, 0x0, 0x78},
+ {0x2b58, 0x0, 0x0, 0x0, 0x78},
{0x4c40, 0x0, 0x0, 0x0, 0x78},
- {0x3df0, 0x0, 0x0, 0x0, 0x78},
- {0x29b0, 0x0, 0x0, 0x0, 0x78},
- {0x4c38, 0x0, 0x0, 0x0, 0x78},
- {0x4990, 0x0, 0x0, 0x0, 0x78},
- {0x7f48, 0x0, 0x0, 0x0, 0x78},
+ {0x4998, 0x0, 0x0, 0x0, 0x78},
+ {0x7f50, 0x0, 0x0, 0x0, 0x78},
{0xa28, 0x8, 0x0, 0x0, 0x8},
- {0x61f8, 0x10, 0x0, 0x0, 0x10},
- {0xbd20, 0x30, 0x0, 0x0, 0x30},
- {0x95b8, 0x30, 0x0, 0x0, 0x30},
- {0x4b60, 0x80, 0x0, 0x0, 0x40},
+ {0x6210, 0x10, 0x0, 0x0, 0x10},
+ {0xb820, 0x30, 0x0, 0x0, 0x30},
+ {0x96c0, 0x30, 0x0, 0x0, 0x30},
+ {0x4b68, 0x80, 0x0, 0x0, 0x40},
{0x1f8, 0x4, 0x0, 0x0, 0x4},
- {0x53a0, 0x80, 0x4, 0x0, 0x4},
- {0xc7c8, 0x0, 0x0, 0x0, 0x4},
- {0x4ba0, 0x80, 0x0, 0x0, 0x20},
- {0x8150, 0x40, 0x0, 0x0, 0x30},
- {0xec70, 0x60, 0x0, 0x0, 0x60},
- {0x2b48, 0x80, 0x0, 0x0, 0x38},
- {0xf1b0, 0x78, 0x0, 0x0, 0x78},
+ {0x53a8, 0x80, 0x4, 0x0, 0x4},
+ {0xc7d0, 0x0, 0x0, 0x0, 0x4},
+ {0x4ba8, 0x80, 0x0, 0x0, 0x20},
+ {0x8158, 0x40, 0x0, 0x0, 0x30},
+ {0xe770, 0x60, 0x0, 0x0, 0x60},
+ {0x2cf0, 0x80, 0x0, 0x0, 0x38},
+ {0xf2b8, 0x78, 0x0, 0x0, 0x78},
{0x1f8, 0x4, 0x0, 0x0, 0x4},
- {0xaef8, 0x0, 0x0, 0x0, 0xf0},
- {0xafe8, 0x8, 0x0, 0x0, 0x8},
+ {0xaf20, 0x0, 0x0, 0x0, 0xf0},
+ {0xb010, 0x8, 0x0, 0x0, 0x8},
{0x1f8, 0x8, 0x0, 0x0, 0x8},
{0xac0, 0x8, 0x0, 0x0, 0x8},
{0x2578, 0x8, 0x0, 0x0, 0x8},
{0x24f8, 0x8, 0x0, 0x0, 0x8},
{0x0, 0x8, 0x0, 0x0, 0x8},
- {0x200, 0x10, 0x8, 0x0, 0x8},
- {0xb78, 0x10, 0x8, 0x0, 0x2},
- {0xd9a8, 0x38, 0x0, 0x0, 0x24},
- {0x12988, 0x10, 0x0, 0x0, 0x8},
- {0x11fa0, 0x38, 0x0, 0x0, 0x18},
- {0xa580, 0x38, 0x0, 0x0, 0x10},
- {0x86f8, 0x30, 0x0, 0x0, 0x18},
- {0x101f8, 0x10, 0x0, 0x0, 0x10},
- {0xde28, 0x48, 0x0, 0x0, 0x38},
- {0x10660, 0x20, 0x0, 0x0, 0x20},
- {0x2b80, 0x80, 0x0, 0x0, 0x10},
- {0x5020, 0x10, 0x0, 0x0, 0x10},
- {0xc9b0, 0x30, 0x0, 0x0, 0x10},
- {0xeec0, 0x10, 0x0, 0x0, 0x10},
+ {0x400, 0x18, 0x8, 0x0, 0x8},
+ {0xb78, 0x18, 0x8, 0x0, 0x2},
+ {0xd898, 0x50, 0x0, 0x0, 0x3c},
+ {0x12908, 0x18, 0x0, 0x0, 0x10},
+ {0x11aa8, 0x40, 0x0, 0x0, 0x18},
+ {0xa588, 0x50, 0x0, 0x0, 0x20},
+ {0x8700, 0x40, 0x0, 0x0, 0x28},
+ {0x10300, 0x18, 0x0, 0x0, 0x10},
+ {0xde48, 0x48, 0x0, 0x0, 0x38},
+ {0x10768, 0x20, 0x0, 0x0, 0x20},
+ {0x2d28, 0x80, 0x0, 0x0, 0x10},
+ {0x5048, 0x10, 0x0, 0x0, 0x10},
+ {0xc9b8, 0x30, 0x0, 0x0, 0x10},
+ {0xeee0, 0x10, 0x0, 0x0, 0x10},
+ {0xa3a0, 0x10, 0x0, 0x0, 0x10},
+ {0x13108, 0x8, 0x0, 0x0, 0x8},
};
/* Runtime array offsets */
-#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
-#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
-#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
-#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
-#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
-#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
-#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
-#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
-#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
-#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
-#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
-#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
-#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
-#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
-#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
-#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
-#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
-#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
-#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
-#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
-#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
-#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
-#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
-#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
-#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
-#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
-#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
-#define CAU_REG_PI_MEMORY_RT_SIZE 4416
-#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
-#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
-#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
-#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
-#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
-#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
-#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655
-#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656
-#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657
-#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658
-#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
-#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
-#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
-#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
-#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
-#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
-#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
-#define SRC_REG_FIRSTFREE_RT_SIZE 2
-#define SRC_REG_LASTFREE_RT_OFFSET 6667
-#define SRC_REG_LASTFREE_RT_SIZE 2
-#define SRC_REG_COUNTFREE_RT_OFFSET 6669
-#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
-#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
-#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
-#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
-#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
-#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
-#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676
-#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677
-#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678
-#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679
-#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680
-#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681
-#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682
-#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683
-#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684
-#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685
-#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686
-#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687
-#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688
-#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
-#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
-#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691
-#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692
-#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693
-#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694
-#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695
-#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696
-#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
-#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
-#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6700
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6701
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6702
-#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28702
-#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28703
-#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28704
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714
-#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130
-#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29738
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29739
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29740
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29741
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29742
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29743
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29744
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29745
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29746
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29747
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29748
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29749
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29750
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29751
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29752
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29753
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29754
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29755
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29756
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29757
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29758
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29759
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29760
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29761
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29762
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29763
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29764
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29765
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29766
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29767
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29768
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29769
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29770
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29771
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29772
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29773
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29774
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29775
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29776
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29777
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29778
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29779
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29780
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29781
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29782
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29783
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29784
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29785
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29786
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29787
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29788
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29789
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29790
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29791
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29792
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29793
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29794
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29795
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29796
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29797
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29798
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29799
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29800
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29801
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29802
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29803
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29804
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29805
-#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29933
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29934
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29935
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29936
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29937
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29938
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29939
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29940
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29941
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29942
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29943
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29944
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29945
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29946
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29947
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29948
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29949
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29950
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29951
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29952
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29953
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29954
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29955
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29956
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29957
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29958
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29959
-#define QM_REG_PQTX2PF_0_RT_OFFSET 29960
-#define QM_REG_PQTX2PF_1_RT_OFFSET 29961
-#define QM_REG_PQTX2PF_2_RT_OFFSET 29962
-#define QM_REG_PQTX2PF_3_RT_OFFSET 29963
-#define QM_REG_PQTX2PF_4_RT_OFFSET 29964
-#define QM_REG_PQTX2PF_5_RT_OFFSET 29965
-#define QM_REG_PQTX2PF_6_RT_OFFSET 29966
-#define QM_REG_PQTX2PF_7_RT_OFFSET 29967
-#define QM_REG_PQTX2PF_8_RT_OFFSET 29968
-#define QM_REG_PQTX2PF_9_RT_OFFSET 29969
-#define QM_REG_PQTX2PF_10_RT_OFFSET 29970
-#define QM_REG_PQTX2PF_11_RT_OFFSET 29971
-#define QM_REG_PQTX2PF_12_RT_OFFSET 29972
-#define QM_REG_PQTX2PF_13_RT_OFFSET 29973
-#define QM_REG_PQTX2PF_14_RT_OFFSET 29974
-#define QM_REG_PQTX2PF_15_RT_OFFSET 29975
-#define QM_REG_PQTX2PF_16_RT_OFFSET 29976
-#define QM_REG_PQTX2PF_17_RT_OFFSET 29977
-#define QM_REG_PQTX2PF_18_RT_OFFSET 29978
-#define QM_REG_PQTX2PF_19_RT_OFFSET 29979
-#define QM_REG_PQTX2PF_20_RT_OFFSET 29980
-#define QM_REG_PQTX2PF_21_RT_OFFSET 29981
-#define QM_REG_PQTX2PF_22_RT_OFFSET 29982
-#define QM_REG_PQTX2PF_23_RT_OFFSET 29983
-#define QM_REG_PQTX2PF_24_RT_OFFSET 29984
-#define QM_REG_PQTX2PF_25_RT_OFFSET 29985
-#define QM_REG_PQTX2PF_26_RT_OFFSET 29986
-#define QM_REG_PQTX2PF_27_RT_OFFSET 29987
-#define QM_REG_PQTX2PF_28_RT_OFFSET 29988
-#define QM_REG_PQTX2PF_29_RT_OFFSET 29989
-#define QM_REG_PQTX2PF_30_RT_OFFSET 29990
-#define QM_REG_PQTX2PF_31_RT_OFFSET 29991
-#define QM_REG_PQTX2PF_32_RT_OFFSET 29992
-#define QM_REG_PQTX2PF_33_RT_OFFSET 29993
-#define QM_REG_PQTX2PF_34_RT_OFFSET 29994
-#define QM_REG_PQTX2PF_35_RT_OFFSET 29995
-#define QM_REG_PQTX2PF_36_RT_OFFSET 29996
-#define QM_REG_PQTX2PF_37_RT_OFFSET 29997
-#define QM_REG_PQTX2PF_38_RT_OFFSET 29998
-#define QM_REG_PQTX2PF_39_RT_OFFSET 29999
-#define QM_REG_PQTX2PF_40_RT_OFFSET 30000
-#define QM_REG_PQTX2PF_41_RT_OFFSET 30001
-#define QM_REG_PQTX2PF_42_RT_OFFSET 30002
-#define QM_REG_PQTX2PF_43_RT_OFFSET 30003
-#define QM_REG_PQTX2PF_44_RT_OFFSET 30004
-#define QM_REG_PQTX2PF_45_RT_OFFSET 30005
-#define QM_REG_PQTX2PF_46_RT_OFFSET 30006
-#define QM_REG_PQTX2PF_47_RT_OFFSET 30007
-#define QM_REG_PQTX2PF_48_RT_OFFSET 30008
-#define QM_REG_PQTX2PF_49_RT_OFFSET 30009
-#define QM_REG_PQTX2PF_50_RT_OFFSET 30010
-#define QM_REG_PQTX2PF_51_RT_OFFSET 30011
-#define QM_REG_PQTX2PF_52_RT_OFFSET 30012
-#define QM_REG_PQTX2PF_53_RT_OFFSET 30013
-#define QM_REG_PQTX2PF_54_RT_OFFSET 30014
-#define QM_REG_PQTX2PF_55_RT_OFFSET 30015
-#define QM_REG_PQTX2PF_56_RT_OFFSET 30016
-#define QM_REG_PQTX2PF_57_RT_OFFSET 30017
-#define QM_REG_PQTX2PF_58_RT_OFFSET 30018
-#define QM_REG_PQTX2PF_59_RT_OFFSET 30019
-#define QM_REG_PQTX2PF_60_RT_OFFSET 30020
-#define QM_REG_PQTX2PF_61_RT_OFFSET 30021
-#define QM_REG_PQTX2PF_62_RT_OFFSET 30022
-#define QM_REG_PQTX2PF_63_RT_OFFSET 30023
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 30024
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 30025
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 30026
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 30027
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 30028
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 30029
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 30030
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 30031
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 30032
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 30033
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 30034
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 30035
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 30036
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 30037
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 30038
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 30039
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 30040
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 30041
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 30042
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 30043
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 30044
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 30045
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 30046
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 30047
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 30048
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 30049
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 30050
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 30051
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 30052
-#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30308
-#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 30564
-#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 30820
-#define QM_REG_RLPFPERIOD_RT_OFFSET 30821
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30822
-#define QM_REG_RLPFINCVAL_RT_OFFSET 30823
-#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30839
-#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 30855
-#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 30871
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30872
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30873
-#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30889
-#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 30905
-#define QM_REG_WFQPFCRD_RT_SIZE 256
-#define QM_REG_WFQPFENABLE_RT_OFFSET 31161
-#define QM_REG_WFQVPENABLE_RT_OFFSET 31162
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31163
-#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 31675
-#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32187
-#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 32699
-#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 33211
-#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33723
-#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320
-#define QM_REG_VOQCRDLINE_RT_OFFSET 34043
-#define QM_REG_VOQCRDLINE_RT_SIZE 36
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 34079
-#define QM_REG_VOQINITCRDLINE_RT_SIZE 36
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34115
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34116
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34117
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34118
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34119
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34120
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34121
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34122
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34126
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34130
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34134
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34135
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34167
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34183
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34199
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34215
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34231
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 34232
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34233
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34234
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34235
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34236
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34237
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34238
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34239
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34240
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34241
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34242
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34243
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34244
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34245
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34246
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34247
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34248
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34249
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34250
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34251
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34252
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34253
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34254
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34255
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34256
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34257
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34258
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34259
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34260
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34261
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34262
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34263
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34264
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34265
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34266
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34267
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34268
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34269
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34270
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34271
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34272
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34273
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34274
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34275
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34276
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34277
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34278
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34279
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34280
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34281
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34282
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34283
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34284
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34285
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34286
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34287
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34288
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34289
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34290
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34291
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34292
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34293
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34294
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34295
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34296
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34297
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34298
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34299
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34300
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34301
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34302
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34303
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34304
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34305
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34306
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34307
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34308
-
-#define RUNTIME_ARRAY_SIZE 34309
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
+#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
+#define DORQ_REG_GLB_MAX_ICID_0_RT_OFFSET 18
+#define DORQ_REG_GLB_MAX_ICID_1_RT_OFFSET 19
+#define DORQ_REG_GLB_RANGE2CONN_TYPE_0_RT_OFFSET 20
+#define DORQ_REG_GLB_RANGE2CONN_TYPE_1_RT_OFFSET 21
+#define DORQ_REG_PRV_PF_MAX_ICID_2_RT_OFFSET 22
+#define DORQ_REG_PRV_PF_MAX_ICID_3_RT_OFFSET 23
+#define DORQ_REG_PRV_PF_MAX_ICID_4_RT_OFFSET 24
+#define DORQ_REG_PRV_PF_MAX_ICID_5_RT_OFFSET 25
+#define DORQ_REG_PRV_VF_MAX_ICID_2_RT_OFFSET 26
+#define DORQ_REG_PRV_VF_MAX_ICID_3_RT_OFFSET 27
+#define DORQ_REG_PRV_VF_MAX_ICID_4_RT_OFFSET 28
+#define DORQ_REG_PRV_VF_MAX_ICID_5_RT_OFFSET 29
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_2_RT_OFFSET 30
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_3_RT_OFFSET 31
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_4_RT_OFFSET 32
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_5_RT_OFFSET 33
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_2_RT_OFFSET 34
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_3_RT_OFFSET 35
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_4_RT_OFFSET 36
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_5_RT_OFFSET 37
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 38
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 39
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 40
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 41
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 42
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 43
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 44
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 45
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 1024
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1069
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 1024
+#define CAU_REG_PI_MEMORY_RT_OFFSET 2093
+#define CAU_REG_PI_MEMORY_RT_SIZE 4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6509
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6510
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6511
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6512
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6513
+#define PRS_REG_SEARCH_TCP_RT_OFFSET 6514
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6515
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6516
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6517
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6518
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6519
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6520
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6521
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6522
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6523
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6524
+#define SRC_REG_FIRSTFREE_RT_OFFSET 6525
+#define SRC_REG_FIRSTFREE_RT_SIZE 2
+#define SRC_REG_LASTFREE_RT_OFFSET 6527
+#define SRC_REG_LASTFREE_RT_SIZE 2
+#define SRC_REG_COUNTFREE_RT_OFFSET 6529
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6530
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6531
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6532
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6533
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6534
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6535
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6536
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6537
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6538
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6539
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6540
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6541
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6542
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6543
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6544
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6545
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6546
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6547
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6548
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6549
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6550
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6551
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6552
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6553
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6554
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6555
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6556
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6557
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6558
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6559
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6560
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6561
+#define PSWRQ2_REG_TGSRC_FIRST_ILT_RT_OFFSET 6562
+#define PSWRQ2_REG_RGSRC_FIRST_ILT_RT_OFFSET 6563
+#define PSWRQ2_REG_TGSRC_LAST_ILT_RT_OFFSET 6564
+#define PSWRQ2_REG_RGSRC_LAST_ILT_RT_OFFSET 6565
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6566
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 26414
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 32980
+#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 32981
+#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 32982
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 32983
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 32984
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 32985
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 32986
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 32987
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 32988
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 32989
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 32990
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 32991
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 32992
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 33408
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 34016
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 34017
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 34018
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 34019
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 34020
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 34021
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 34022
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 34023
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 34024
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 34025
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 34026
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 34027
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 34028
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 34029
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 34030
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 34031
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 34032
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 34033
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 34034
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 34035
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 34036
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 34037
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 34038
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 34039
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 34040
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 34041
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 34042
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 34043
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 34044
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 34045
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 34046
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 34047
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 34048
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 34049
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 34050
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 34051
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 34052
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 34053
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 34054
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 34055
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 34056
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 34057
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 34058
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 34059
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 34060
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 34061
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 34062
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 34063
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 34064
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 34065
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 34066
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 34067
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 34068
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 34069
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 34070
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 34071
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 34072
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 34073
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 34074
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 34075
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 34076
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 34077
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 34078
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 34079
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 34080
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 34081
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 34082
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 34083
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
+#define QM_REG_PTRTBLOTHER_RT_OFFSET 34211
+#define QM_REG_PTRTBLOTHER_RT_SIZE 256
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 34467
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 34468
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 34469
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 34470
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 34471
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 34472
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 34473
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 34474
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 34475
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 34476
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 34477
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 34478
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 34479
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 34480
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 34481
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 34482
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 34483
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 34484
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 34485
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 34486
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 34487
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 34488
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 34489
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 34490
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 34491
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 34492
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 34493
+#define QM_REG_PQTX2PF_0_RT_OFFSET 34494
+#define QM_REG_PQTX2PF_1_RT_OFFSET 34495
+#define QM_REG_PQTX2PF_2_RT_OFFSET 34496
+#define QM_REG_PQTX2PF_3_RT_OFFSET 34497
+#define QM_REG_PQTX2PF_4_RT_OFFSET 34498
+#define QM_REG_PQTX2PF_5_RT_OFFSET 34499
+#define QM_REG_PQTX2PF_6_RT_OFFSET 34500
+#define QM_REG_PQTX2PF_7_RT_OFFSET 34501
+#define QM_REG_PQTX2PF_8_RT_OFFSET 34502
+#define QM_REG_PQTX2PF_9_RT_OFFSET 34503
+#define QM_REG_PQTX2PF_10_RT_OFFSET 34504
+#define QM_REG_PQTX2PF_11_RT_OFFSET 34505
+#define QM_REG_PQTX2PF_12_RT_OFFSET 34506
+#define QM_REG_PQTX2PF_13_RT_OFFSET 34507
+#define QM_REG_PQTX2PF_14_RT_OFFSET 34508
+#define QM_REG_PQTX2PF_15_RT_OFFSET 34509
+#define QM_REG_PQTX2PF_16_RT_OFFSET 34510
+#define QM_REG_PQTX2PF_17_RT_OFFSET 34511
+#define QM_REG_PQTX2PF_18_RT_OFFSET 34512
+#define QM_REG_PQTX2PF_19_RT_OFFSET 34513
+#define QM_REG_PQTX2PF_20_RT_OFFSET 34514
+#define QM_REG_PQTX2PF_21_RT_OFFSET 34515
+#define QM_REG_PQTX2PF_22_RT_OFFSET 34516
+#define QM_REG_PQTX2PF_23_RT_OFFSET 34517
+#define QM_REG_PQTX2PF_24_RT_OFFSET 34518
+#define QM_REG_PQTX2PF_25_RT_OFFSET 34519
+#define QM_REG_PQTX2PF_26_RT_OFFSET 34520
+#define QM_REG_PQTX2PF_27_RT_OFFSET 34521
+#define QM_REG_PQTX2PF_28_RT_OFFSET 34522
+#define QM_REG_PQTX2PF_29_RT_OFFSET 34523
+#define QM_REG_PQTX2PF_30_RT_OFFSET 34524
+#define QM_REG_PQTX2PF_31_RT_OFFSET 34525
+#define QM_REG_PQTX2PF_32_RT_OFFSET 34526
+#define QM_REG_PQTX2PF_33_RT_OFFSET 34527
+#define QM_REG_PQTX2PF_34_RT_OFFSET 34528
+#define QM_REG_PQTX2PF_35_RT_OFFSET 34529
+#define QM_REG_PQTX2PF_36_RT_OFFSET 34530
+#define QM_REG_PQTX2PF_37_RT_OFFSET 34531
+#define QM_REG_PQTX2PF_38_RT_OFFSET 34532
+#define QM_REG_PQTX2PF_39_RT_OFFSET 34533
+#define QM_REG_PQTX2PF_40_RT_OFFSET 34534
+#define QM_REG_PQTX2PF_41_RT_OFFSET 34535
+#define QM_REG_PQTX2PF_42_RT_OFFSET 34536
+#define QM_REG_PQTX2PF_43_RT_OFFSET 34537
+#define QM_REG_PQTX2PF_44_RT_OFFSET 34538
+#define QM_REG_PQTX2PF_45_RT_OFFSET 34539
+#define QM_REG_PQTX2PF_46_RT_OFFSET 34540
+#define QM_REG_PQTX2PF_47_RT_OFFSET 34541
+#define QM_REG_PQTX2PF_48_RT_OFFSET 34542
+#define QM_REG_PQTX2PF_49_RT_OFFSET 34543
+#define QM_REG_PQTX2PF_50_RT_OFFSET 34544
+#define QM_REG_PQTX2PF_51_RT_OFFSET 34545
+#define QM_REG_PQTX2PF_52_RT_OFFSET 34546
+#define QM_REG_PQTX2PF_53_RT_OFFSET 34547
+#define QM_REG_PQTX2PF_54_RT_OFFSET 34548
+#define QM_REG_PQTX2PF_55_RT_OFFSET 34549
+#define QM_REG_PQTX2PF_56_RT_OFFSET 34550
+#define QM_REG_PQTX2PF_57_RT_OFFSET 34551
+#define QM_REG_PQTX2PF_58_RT_OFFSET 34552
+#define QM_REG_PQTX2PF_59_RT_OFFSET 34553
+#define QM_REG_PQTX2PF_60_RT_OFFSET 34554
+#define QM_REG_PQTX2PF_61_RT_OFFSET 34555
+#define QM_REG_PQTX2PF_62_RT_OFFSET 34556
+#define QM_REG_PQTX2PF_63_RT_OFFSET 34557
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 34558
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 34559
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 34560
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 34561
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 34562
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 34563
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 34564
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 34565
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 34566
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 34567
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 34568
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 34569
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 34570
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 34571
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 34572
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 34573
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 34574
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 34575
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 34576
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 34577
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 34578
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 34579
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 34580
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 34581
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 34582
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 34583
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 34584
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 34585
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 34586
+#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 34842
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
+#define QM_REG_RLGLBLCRD_RT_OFFSET 35098
+#define QM_REG_RLGLBLCRD_RT_SIZE 256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 35354
+#define QM_REG_RLPFPERIOD_RT_OFFSET 35355
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 35356
+#define QM_REG_RLPFINCVAL_RT_OFFSET 35357
+#define QM_REG_RLPFINCVAL_RT_SIZE 16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 35373
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_RLPFCRD_RT_OFFSET 35389
+#define QM_REG_RLPFCRD_RT_SIZE 16
+#define QM_REG_RLPFENABLE_RT_OFFSET 35405
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 35406
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 35407
+#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 35423
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_WFQPFCRD_RT_OFFSET 35439
+#define QM_REG_WFQPFCRD_RT_SIZE 256
+#define QM_REG_WFQPFENABLE_RT_OFFSET 35695
+#define QM_REG_WFQVPENABLE_RT_OFFSET 35696
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 35697
+#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
+#define QM_REG_TXPQMAP_RT_OFFSET 36209
+#define QM_REG_TXPQMAP_RT_SIZE 512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 36721
+#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
+#define QM_REG_WFQVPCRD_RT_OFFSET 37233
+#define QM_REG_WFQVPCRD_RT_SIZE 512
+#define QM_REG_WFQVPMAP_RT_OFFSET 37745
+#define QM_REG_WFQVPMAP_RT_SIZE 512
+#define QM_REG_PTRTBLTX_RT_OFFSET 38257
+#define QM_REG_PTRTBLTX_RT_SIZE 1024
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 39281
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320
+#define QM_REG_VOQCRDLINE_RT_OFFSET 39601
+#define QM_REG_VOQCRDLINE_RT_SIZE 36
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 39637
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 36
+#define QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET 39673
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 39674
+#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 39675
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 39676
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 39677
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 39678
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 39679
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 39680
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 39681
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 39685
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 39689
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 39721
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 39737
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 39753
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 39769
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 39785
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 39786
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39787
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39795
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40819
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41331
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41843
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42355
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512
+#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42867
+#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42899
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42900
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42901
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42902
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42903
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42904
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42905
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42906
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42907
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42908
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42909
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42910
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42911
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42912
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42913
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42914
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42915
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42916
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42917
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42918
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42919
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42920
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42921
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42922
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42923
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42924
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42925
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42926
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42927
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42928
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42929
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42930
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42931
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42932
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42933
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42934
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42935
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42936
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42937
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42938
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42939
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42940
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42941
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42942
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42943
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42944
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42945
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42946
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42947
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42948
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42949
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42950
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42951
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42952
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42953
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42954
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42955
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42956
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42957
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42958
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42959
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42960
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42961
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42962
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42963
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42964
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42965
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42966
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42967
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42968
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42969
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42970
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42971
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42972
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42973
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42974
+#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42975
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42976
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42977
+#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42978
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42979
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42980
+#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42981
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42982
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42983
+#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42984
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42985
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42986
+#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42987
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42988
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42989
+#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42990
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42991
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42992
+#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42993
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42994
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42995
+#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42996
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42997
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42998
+#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42999
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 43000
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43001
+#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43002
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43003
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43004
+#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43005
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43006
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43007
+#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43008
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43009
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43010
+#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43011
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43012
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43013
+#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43014
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43015
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43016
+#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43017
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43018
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43019
+#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43020
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43021
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43022
+
+#define RUNTIME_ARRAY_SIZE 43023
+
+/* Init Callbacks */
+#define DMAE_READY_CB 0
/* The eth storm context for the Tstorm */
struct tstorm_eth_conn_st_ctx {
@@ -4436,219 +4935,219 @@ struct xstorm_eth_conn_st_ctx {
__le32 reserved[60];
};
-struct xstorm_eth_conn_ag_ctx {
+struct e4_xstorm_eth_conn_ag_ctx {
u8 reserved0;
- u8 eth_state;
+ u8 state;
u8 flags0;
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
-#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
u8 flags10;
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
u8 edpm_event_id;
__le16 physical_q0;
- __le16 ereserved1;
+ __le16 e5_reserved1;
__le16 edpm_num_bds;
__le16 tx_bd_cons;
__le16 tx_bd_prod;
@@ -4681,7 +5180,7 @@ struct xstorm_eth_conn_ag_ctx {
u8 byte13;
u8 byte14;
u8 byte15;
- u8 ereserved;
+ u8 e5_reserved;
__le16 word11;
__le32 reg10;
__le32 reg11;
@@ -4704,37 +5203,37 @@ struct ystorm_eth_conn_st_ctx {
__le32 reserved[8];
};
-struct ystorm_eth_conn_ag_ctx {
+struct e4_ystorm_eth_conn_ag_ctx {
u8 byte0;
u8 state;
u8 flags0;
-#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
-#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
-#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 tx_q0_int_coallecing_timeset;
u8 byte3;
__le16 word0;
@@ -4748,89 +5247,89 @@ struct ystorm_eth_conn_ag_ctx {
__le32 reg3;
};
-struct tstorm_eth_conn_ag_ctx {
+struct e4_tstorm_eth_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 reg2;
@@ -4852,63 +5351,63 @@ struct tstorm_eth_conn_ag_ctx {
__le32 reg10;
};
-struct ustorm_eth_conn_ag_ctx {
+struct e4_ustorm_eth_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
u8 flags2;
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -4932,20 +5431,21 @@ struct mstorm_eth_conn_st_ctx {
};
/* eth connection context */
-struct eth_conn_context {
+struct e4_eth_conn_context {
struct tstorm_eth_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
struct pstorm_eth_conn_st_ctx pstorm_st_context;
struct xstorm_eth_conn_st_ctx xstorm_st_context;
- struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
+ struct e4_xstorm_eth_conn_ag_ctx xstorm_ag_context;
struct ystorm_eth_conn_st_ctx ystorm_st_context;
- struct ystorm_eth_conn_ag_ctx ystorm_ag_context;
- struct tstorm_eth_conn_ag_ctx tstorm_ag_context;
- struct ustorm_eth_conn_ag_ctx ustorm_ag_context;
+ struct e4_ystorm_eth_conn_ag_ctx ystorm_ag_context;
+ struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context;
+ struct e4_ustorm_eth_conn_ag_ctx ustorm_ag_context;
struct ustorm_eth_conn_st_ctx ustorm_st_context;
struct mstorm_eth_conn_st_ctx mstorm_st_context;
};
+/* Ethernet filter types: mac/vlan/pair */
enum eth_error_code {
ETH_OK = 0x00,
ETH_FILTERS_MAC_ADD_FAIL_FULL,
@@ -4972,6 +5472,7 @@ enum eth_error_code {
MAX_ETH_ERROR_CODE
};
+/* Opcodes for the event ring */
enum eth_event_opcode {
ETH_EVENT_UNUSED,
ETH_EVENT_VPORT_START,
@@ -4983,13 +5484,14 @@ enum eth_event_opcode {
ETH_EVENT_RX_QUEUE_UPDATE,
ETH_EVENT_RX_QUEUE_STOP,
ETH_EVENT_FILTERS_UPDATE,
- ETH_EVENT_RESERVED,
- ETH_EVENT_RESERVED2,
- ETH_EVENT_RESERVED3,
+ ETH_EVENT_RX_ADD_OPENFLOW_FILTER,
+ ETH_EVENT_RX_DELETE_OPENFLOW_FILTER,
+ ETH_EVENT_RX_CREATE_OPENFLOW_ACTION,
ETH_EVENT_RX_ADD_UDP_FILTER,
ETH_EVENT_RX_DELETE_UDP_FILTER,
- ETH_EVENT_RESERVED4,
- ETH_EVENT_RESERVED5,
+ ETH_EVENT_RX_CREATE_GFT_ACTION,
+ ETH_EVENT_RX_GFT_UPDATE_FILTER,
+ ETH_EVENT_TX_QUEUE_UPDATE,
MAX_ETH_EVENT_OPCODE
};
@@ -5039,6 +5541,7 @@ enum eth_filter_type {
MAX_ETH_FILTER_TYPE
};
+/* Eth IPv4 Fragment Type */
enum eth_ipv4_frag_type {
ETH_IPV4_NOT_FRAG,
ETH_IPV4_FIRST_FRAG,
@@ -5046,12 +5549,14 @@ enum eth_ipv4_frag_type {
MAX_ETH_IPV4_FRAG_TYPE
};
+/* eth IPv4 Fragment Type */
enum eth_ip_type {
ETH_IPV4,
ETH_IPV6,
MAX_ETH_IP_TYPE
};
+/* Ethernet Ramrod Command IDs */
enum eth_ramrod_cmd_id {
ETH_RAMROD_UNUSED,
ETH_RAMROD_VPORT_START,
@@ -5070,10 +5575,11 @@ enum eth_ramrod_cmd_id {
ETH_RAMROD_RX_DELETE_UDP_FILTER,
ETH_RAMROD_RX_CREATE_GFT_ACTION,
ETH_RAMROD_GFT_UPDATE_FILTER,
+ ETH_RAMROD_TX_QUEUE_UPDATE,
MAX_ETH_RAMROD_CMD_ID
};
-/* return code from eth sp ramrods */
+/* Return code from eth sp ramrods */
struct eth_return_code {
u8 value;
#define ETH_RETURN_CODE_ERR_CODE_MASK 0x1F
@@ -5209,18 +5715,14 @@ struct eth_vport_tx_mode {
__le16 reserved2[3];
};
+/* GFT filter update action type */
enum gft_filter_update_action {
GFT_ADD_FILTER,
GFT_DELETE_FILTER,
MAX_GFT_FILTER_UPDATE_ACTION
};
-enum gft_logic_filter_type {
- GFT_FILTER_TYPE,
- RFS_FILTER_TYPE,
- MAX_GFT_LOGIC_FILTER_TYPE
-};
-
+/* Ramrod data for rx add openflow filter */
struct rx_add_openflow_filter_data {
__le16 action_icid;
u8 priority;
@@ -5244,11 +5746,13 @@ struct rx_add_openflow_filter_data {
__le16 l4_src_port;
};
+/* Ramrod data for rx create gft action */
struct rx_create_gft_action_data {
u8 vport_id;
u8 reserved[7];
};
+/* Ramrod data for rx create openflow action */
struct rx_create_openflow_action_data {
u8 vport_id;
u8 reserved[7];
@@ -5286,7 +5790,7 @@ struct rx_queue_start_ramrod_data {
struct regpair reserved2;
};
-/* Ramrod data for rx queue start ramrod */
+/* Ramrod data for rx queue stop ramrod */
struct rx_queue_stop_ramrod_data {
__le16 rx_queue_id;
u8 complete_cqe_flg;
@@ -5324,14 +5828,22 @@ struct rx_udp_filter_data {
__le32 tenant_id;
};
+/* Add or delete GFT filter - filter is packet header of type of packet wished
+ * to pass certain FW flow.
+ */
struct rx_update_gft_filter_data {
struct regpair pkt_hdr_addr;
__le16 pkt_hdr_length;
- __le16 rx_qid_or_action_icid;
- u8 vport_id;
- u8 filter_type;
+ __le16 action_icid;
+ __le16 rx_qid;
+ __le16 flow_id;
+ __le16 vport_id;
+ u8 action_icid_valid;
+ u8 rx_qid_valid;
+ u8 flow_id_valid;
u8 filter_action;
u8 assert_on_error;
+ u8 reserved;
};
/* Ramrod data for rx queue start ramrod */
@@ -5377,6 +5889,14 @@ struct tx_queue_stop_ramrod_data {
__le16 reserved[4];
};
+/* Ramrod data for tx queue update ramrod */
+struct tx_queue_update_ramrod_data {
+ __le16 update_qm_pq_id_flg;
+ __le16 qm_pq_id;
+ __le32 reserved0;
+ struct regpair reserved1[5];
+};
+
/* Ramrod data for vport update ramrod */
struct vport_filter_update_ramrod_data {
struct eth_filter_cmd_header filter_cmd_hdr;
@@ -5477,219 +5997,219 @@ struct vport_update_ramrod_data {
struct eth_vport_rss_config rss_config;
};
-struct xstorm_eth_conn_agctxdq_ext_ldpart {
+struct e4_xstorm_eth_conn_ag_ctx_dq_ext_ldpart {
u8 reserved0;
- u8 eth_state;
+ u8 state;
u8 flags0;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7
u8 flags1;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED2_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED2_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED3_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6
u8 flags3;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6
u8 flags4;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6
u8 flags5;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6
u8 flags6;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6
u8 flags7;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7
u8 flags10;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7
u8 flags11;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6
u8 edpm_event_id;
__le16 physical_q0;
- __le16 ereserved1;
+ __le16 e5_reserved1;
__le16 edpm_num_bds;
__le16 tx_bd_cons;
__le16 tx_bd_prod;
@@ -5706,256 +6226,256 @@ struct xstorm_eth_conn_agctxdq_ext_ldpart {
__le32 reg4;
};
-struct mstorm_eth_conn_ag_ctx {
+struct e4_mstorm_eth_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
-#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct xstorm_eth_hw_conn_ag_ctx {
+struct e4_xstorm_eth_hw_conn_ag_ctx {
u8 reserved0;
- u8 eth_state;
+ u8 state;
u8 flags0;
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
u8 flags10;
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
u8 edpm_event_id;
__le16 physical_q0;
- __le16 ereserved1;
+ __le16 e5_reserved1;
__le16 edpm_num_bds;
__le16 tx_bd_cons;
__le16 tx_bd_prod;
@@ -5963,6 +6483,7 @@ struct xstorm_eth_hw_conn_ag_ctx {
__le16 conn_dpi;
};
+/* GFT CAM line struct */
struct gft_cam_line {
__le32 camline;
#define GFT_CAM_LINE_VALID_MASK 0x1
@@ -5975,6 +6496,7 @@ struct gft_cam_line {
#define GFT_CAM_LINE_RESERVED1_SHIFT 29
};
+/* GFT CAM line struct with fields breakout */
struct gft_cam_line_mapped {
__le32 camline;
#define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1
@@ -6008,28 +6530,31 @@ union gft_cam_line_union {
struct gft_cam_line_mapped cam_line_mapped;
};
+/* Used in gft_profile_key: Indication for ip version */
enum gft_profile_ip_version {
GFT_PROFILE_IPV4 = 0,
GFT_PROFILE_IPV6 = 1,
MAX_GFT_PROFILE_IP_VERSION
};
+/* Profile key stucr fot GFT logic in Prs */
struct gft_profile_key {
__le16 profile_key;
-#define GFT_PROFILE_KEY_IP_VERSION_MASK 0x1
-#define GFT_PROFILE_KEY_IP_VERSION_SHIFT 0
-#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK 0x1
-#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT 1
-#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK 0xF
-#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2
-#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK 0xF
-#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT 6
-#define GFT_PROFILE_KEY_PF_ID_MASK 0xF
-#define GFT_PROFILE_KEY_PF_ID_SHIFT 10
-#define GFT_PROFILE_KEY_RESERVED0_MASK 0x3
-#define GFT_PROFILE_KEY_RESERVED0_SHIFT 14
-};
-
+#define GFT_PROFILE_KEY_IP_VERSION_MASK 0x1
+#define GFT_PROFILE_KEY_IP_VERSION_SHIFT 0
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK 0x1
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT 1
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK 0xF
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK 0xF
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT 6
+#define GFT_PROFILE_KEY_PF_ID_MASK 0xF
+#define GFT_PROFILE_KEY_PF_ID_SHIFT 10
+#define GFT_PROFILE_KEY_RESERVED0_MASK 0x3
+#define GFT_PROFILE_KEY_RESERVED0_SHIFT 14
+};
+
+/* Used in gft_profile_key: Indication for tunnel type */
enum gft_profile_tunnel_type {
GFT_PROFILE_NO_TUNNEL = 0,
GFT_PROFILE_VXLAN_TUNNEL = 1,
@@ -6040,6 +6565,7 @@ enum gft_profile_tunnel_type {
MAX_GFT_PROFILE_TUNNEL_TYPE
};
+/* Used in gft_profile_key: Indication for protocol type */
enum gft_profile_upper_protocol_type {
GFT_PROFILE_ROCE_PROTOCOL = 0,
GFT_PROFILE_RROCE_PROTOCOL = 1,
@@ -6060,6 +6586,7 @@ enum gft_profile_upper_protocol_type {
MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE
};
+/* GFT RAM line struct */
struct gft_ram_line {
__le32 lo;
#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3
@@ -6149,6 +6676,7 @@ struct gft_ram_line {
#define GFT_RAM_LINE_RESERVED1_SHIFT 10
};
+/* Used in the first 2 bits for gft_ram_line: Indication for vlan mask */
enum gft_vlan_select {
INNER_PROVIDER_VLAN = 0,
INNER_VLAN = 1,
@@ -6157,10 +6685,205 @@ enum gft_vlan_select {
MAX_GFT_VLAN_SELECT
};
+/* The rdma task context of Mstorm */
+struct ystorm_rdma_task_st_ctx {
+ struct regpair temp[4];
+};
+
+struct e4_ystorm_rdma_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 msem_ctx_upd_seq;
+ u8 flags0;
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
+ u8 flags1;
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
+ u8 key;
+ __le32 mw_cnt;
+ u8 ref_cnt_seq;
+ u8 ctx_upd_seq;
+ __le16 dif_flags;
+ __le16 tx_ref_count;
+ __le16 last_used_ltid;
+ __le16 parent_mr_lo;
+ __le16 parent_mr_hi;
+ __le32 fbo_lo;
+ __le32 fbo_hi;
+};
+
+struct e4_mstorm_rdma_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 icid;
+ u8 flags0;
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
+ u8 flags1;
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
+ u8 key;
+ __le32 mw_cnt;
+ u8 ref_cnt_seq;
+ u8 ctx_upd_seq;
+ __le16 dif_flags;
+ __le16 tx_ref_count;
+ __le16 last_used_ltid;
+ __le16 parent_mr_lo;
+ __le16 parent_mr_hi;
+ __le32 fbo_lo;
+ __le32 fbo_hi;
+};
+
+/* The roce task context of Mstorm */
struct mstorm_rdma_task_st_ctx {
struct regpair temp[4];
};
+/* The roce task context of Ustorm */
+struct ustorm_rdma_task_st_ctx {
+ struct regpair temp[2];
+};
+
+struct e4_ustorm_rdma_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 icid;
+ u8 flags0;
+#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT 5
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6
+ u8 flags1;
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2
+#define E4_USTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 4
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+ u8 flags2;
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
+#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2
+#define E4_USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7
+ u8 flags3;
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 0
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 2
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+ __le32 dif_err_intervals;
+ __le32 dif_error_1st_interval;
+ __le32 reg2;
+ __le32 dif_runt_value;
+ __le32 reg4;
+ __le32 reg5;
+};
+
+/* RDMA task context */
+struct e4_rdma_task_context {
+ struct ystorm_rdma_task_st_ctx ystorm_st_context;
+ struct e4_ystorm_rdma_task_ag_ctx ystorm_ag_context;
+ struct tdif_task_context tdif_context;
+ struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context;
+ struct mstorm_rdma_task_st_ctx mstorm_st_context;
+ struct rdif_task_context rdif_context;
+ struct ustorm_rdma_task_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
+ struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context;
+};
+
+/* rdma function init ramrod data */
struct rdma_close_func_ramrod_data {
u8 cnq_start_offset;
u8 num_cnqs;
@@ -6169,6 +6892,7 @@ struct rdma_close_func_ramrod_data {
u8 reserved[4];
};
+/* rdma function init CNQ parameters */
struct rdma_cnq_params {
__le16 sb_num;
u8 sb_index;
@@ -6179,6 +6903,7 @@ struct rdma_cnq_params {
u8 reserved1[6];
};
+/* rdma create cq ramrod data */
struct rdma_create_cq_ramrod_data {
struct regpair cq_handle;
struct regpair pbl_addr;
@@ -6193,21 +6918,25 @@ struct rdma_create_cq_ramrod_data {
__le16 reserved1;
};
+/* rdma deregister tid ramrod data */
struct rdma_deregister_tid_ramrod_data {
__le32 itid;
__le32 reserved;
};
+/* rdma destroy cq output params */
struct rdma_destroy_cq_output_params {
__le16 cnq_num;
__le16 reserved0;
__le32 reserved1;
};
+/* rdma destroy cq ramrod data */
struct rdma_destroy_cq_ramrod_data {
struct regpair output_params_addr;
};
+/* RDMA slow path EQ cmd IDs */
enum rdma_event_opcode {
RDMA_EVENT_UNUSED,
RDMA_EVENT_FUNC_INIT,
@@ -6223,6 +6952,7 @@ enum rdma_event_opcode {
MAX_RDMA_EVENT_OPCODE
};
+/* RDMA FW return code for slow path ramrods */
enum rdma_fw_return_code {
RDMA_RETURN_OK = 0,
RDMA_RETURN_REGISTER_MR_BAD_STATE_ERR,
@@ -6232,20 +6962,24 @@ enum rdma_fw_return_code {
MAX_RDMA_FW_RETURN_CODE
};
+/* rdma function init header */
struct rdma_init_func_hdr {
u8 cnq_start_offset;
u8 num_cnqs;
u8 cq_ring_mode;
u8 vf_id;
u8 vf_valid;
- u8 reserved[3];
+ u8 relaxed_ordering;
+ u8 reserved[2];
};
+/* rdma function init ramrod data */
struct rdma_init_func_ramrod_data {
struct rdma_init_func_hdr params_header;
struct rdma_cnq_params cnq_params[NUM_OF_GLOBAL_QUEUES];
};
+/* RDMA ramrod command IDs */
enum rdma_ramrod_cmd_id {
RDMA_RAMROD_UNUSED,
RDMA_RAMROD_FUNC_INIT,
@@ -6261,42 +6995,43 @@ enum rdma_ramrod_cmd_id {
MAX_RDMA_RAMROD_CMD_ID
};
+/* rdma register tid ramrod data */
struct rdma_register_tid_ramrod_data {
__le16 flags;
#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK 0x1F
#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT 0
#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_MASK 0x1
#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT 5
-#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 6
-#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 7
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 8
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 6
+#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 7
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 8
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1
#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT 9
#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_MASK 0x1
#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT 10
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 11
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 12
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 11
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 12
#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_MASK 0x1
#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT 13
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_MASK 0x3
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_SHIFT 14
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_MASK 0x3
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_SHIFT 14
u8 flags1;
#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_MASK 0x1F
-#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT 0
-#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK 0x7
-#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT 5
+#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT 0
+#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK 0x7
+#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT 5
u8 flags2;
-#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT 0
+#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT 0
#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_MASK 0x1
#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_SHIFT 1
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK 0x3F
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT 2
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK 0x3F
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT 2
u8 key;
u8 length_hi;
u8 vf_id;
@@ -6313,19 +7048,21 @@ struct rdma_register_tid_ramrod_data {
__le32 reserved4[2];
};
+/* rdma resize cq output params */
struct rdma_resize_cq_output_params {
__le32 old_cq_cons;
__le32 old_cq_prod;
};
+/* rdma resize cq ramrod data */
struct rdma_resize_cq_ramrod_data {
u8 flags;
-#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK 0x1
-#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT 0
-#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK 0x1
-#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT 1
-#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK 0x3F
-#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT 2
+#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK 0x1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT 0
+#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK 0x1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT 1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK 0x3F
+#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT 2
u8 pbl_log_page_size;
__le16 pbl_num_pages;
__le32 max_cqes;
@@ -6333,10 +7070,12 @@ struct rdma_resize_cq_ramrod_data {
struct regpair output_params_addr;
};
+/* The rdma storm context of Mstorm */
struct rdma_srq_context {
struct regpair temp[8];
};
+/* rdma create qp requester ramrod data */
struct rdma_srq_create_ramrod_data {
struct regpair pbl_base_addr;
__le16 pages_in_srq_pbl;
@@ -6348,206 +7087,19 @@ struct rdma_srq_create_ramrod_data {
struct regpair producers_addr;
};
+/* rdma create qp requester ramrod data */
struct rdma_srq_destroy_ramrod_data {
struct rdma_srq_id srq_id;
__le32 reserved;
};
+/* rdma create qp requester ramrod data */
struct rdma_srq_modify_ramrod_data {
struct rdma_srq_id srq_id;
__le32 wqe_limit;
};
-struct ystorm_rdma_task_st_ctx {
- struct regpair temp[4];
-};
-
-struct ystorm_rdma_task_ag_ctx {
- u8 reserved;
- u8 byte1;
- __le16 msem_ctx_upd_seq;
- u8 flags0;
-#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
-#define YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6
-#define YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
- u8 flags1;
-#define YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
-#define YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
-#define YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
-#define YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
-#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
-#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
-#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
- u8 flags2;
-#define YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
-#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
- u8 key;
- __le32 mw_cnt;
- u8 ref_cnt_seq;
- u8 ctx_upd_seq;
- __le16 dif_flags;
- __le16 tx_ref_count;
- __le16 last_used_ltid;
- __le16 parent_mr_lo;
- __le16 parent_mr_hi;
- __le32 fbo_lo;
- __le32 fbo_hi;
-};
-
-struct mstorm_rdma_task_ag_ctx {
- u8 reserved;
- u8 byte1;
- __le16 icid;
- u8 flags0;
-#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
-#define MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
-#define MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
- u8 flags1;
-#define MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
-#define MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
-#define MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
-#define MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
-#define MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
-#define MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4
-#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
-#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
- u8 flags2;
-#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0
-#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
- u8 key;
- __le32 mw_cnt;
- u8 ref_cnt_seq;
- u8 ctx_upd_seq;
- __le16 dif_flags;
- __le16 tx_ref_count;
- __le16 last_used_ltid;
- __le16 parent_mr_lo;
- __le16 parent_mr_hi;
- __le32 fbo_lo;
- __le32 fbo_hi;
-};
-
-struct ustorm_rdma_task_st_ctx {
- struct regpair temp[2];
-};
-
-struct ustorm_rdma_task_ag_ctx {
- u8 reserved;
- u8 byte1;
- __le16 icid;
- u8 flags0;
-#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT 5
-#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6
- u8 flags1;
-#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0
-#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2
-#define USTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
-#define USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 4
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
- u8 flags2;
-#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
-#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1
-#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2
-#define USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
-#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5
-#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6
-#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7
- u8 flags3;
-#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 0
-#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
-#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 2
-#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
- __le32 dif_err_intervals;
- __le32 dif_error_1st_interval;
- __le32 reg2;
- __le32 dif_runt_value;
- __le32 reg4;
- __le32 reg5;
-};
-
-struct rdma_task_context {
- struct ystorm_rdma_task_st_ctx ystorm_st_context;
- struct ystorm_rdma_task_ag_ctx ystorm_ag_context;
- struct tdif_task_context tdif_context;
- struct mstorm_rdma_task_ag_ctx mstorm_ag_context;
- struct mstorm_rdma_task_st_ctx mstorm_st_context;
- struct rdif_task_context rdif_context;
- struct ustorm_rdma_task_st_ctx ustorm_st_context;
- struct regpair ustorm_st_padding[2];
- struct ustorm_rdma_task_ag_ctx ustorm_ag_context;
-};
-
+/* RDMA Tid type enumeration (for register_tid ramrod) */
enum rdma_tid_type {
RDMA_TID_REGISTERED_MR,
RDMA_TID_FMR,
@@ -6556,214 +7108,214 @@ enum rdma_tid_type {
MAX_RDMA_TID_TYPE
};
-struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
+struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
u8 reserved0;
u8 state;
u8 flags0;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7
u8 flags1;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7
u8 flags2;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6
u8 flags3;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6
u8 flags4;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6
u8 flags5;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6
u8 flags6;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6
u8 flags7;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7
u8 flags10;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7
u8 flags11;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 word1;
@@ -6783,126 +7335,126 @@ struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
__le32 reg4;
};
-struct mstorm_rdma_conn_ag_ctx {
+struct e4_mstorm_rdma_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define MSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0
-#define MSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
-#define MSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct tstorm_rdma_conn_ag_ctx {
+struct e4_tstorm_rdma_conn_ag_ctx {
u8 reserved0;
u8 byte1;
u8 flags0;
-#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define TSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_RDMA_CONN_AG_CTX_BIT3_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT3_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
-#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags2;
-#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
-#define TSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_RDMA_CONN_AG_CTX_CF7_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 5
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
u8 flags4;
-#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
-#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 5
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 reg2;
@@ -6924,73 +7476,73 @@ struct tstorm_rdma_conn_ag_ctx {
__le32 reg10;
};
-struct tstorm_rdma_task_ag_ctx {
+struct e4_tstorm_rdma_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 word0;
u8 flags0;
-#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4
-#define TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
-#define TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
-#define TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
+#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
-#define TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1
-#define TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2
-#define TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4
-#define TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6
u8 flags2;
-#define TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0
-#define TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2
-#define TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4
-#define TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6
u8 flags3;
-#define TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0
-#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2
-#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3
-#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4
-#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5
-#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6
-#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7
u8 flags4;
-#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0
-#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7
u8 byte2;
__le16 word1;
__le32 reg0;
@@ -7003,63 +7555,63 @@ struct tstorm_rdma_task_ag_ctx {
__le32 reg2;
};
-struct ustorm_rdma_conn_ag_ctx {
+struct e4_ustorm_rdma_conn_ag_ctx {
u8 reserved;
u8 byte1;
u8 flags0;
-#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define USTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2
-#define USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
-#define USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
-#define USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_USTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
-#define USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
-#define USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
-#define USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
+#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
u8 flags3;
-#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0
-#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 conn_dpi;
@@ -7072,214 +7624,214 @@ struct ustorm_rdma_conn_ag_ctx {
__le16 word3;
};
-struct xstorm_rdma_conn_ag_ctx {
+struct e4_xstorm_rdma_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_BIT6_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT6_SHIFT 6
-#define XSTORM_RDMA_CONN_AG_CTX_BIT7_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT7_SHIFT 7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT6_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT6_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT7_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT7_SHIFT 7
u8 flags1;
-#define XSTORM_RDMA_CONN_AG_CTX_BIT8_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT8_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_BIT9_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT9_SHIFT 1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT10_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT10_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT 6
-#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT8_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT8_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT9_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT9_SHIFT 1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT10_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT10_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
u8 flags2;
-#define XSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define XSTORM_RDMA_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags4;
-#define XSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_RDMA_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF15_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define XSTORM_RDMA_CONN_AG_CTX_CF16_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF16_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_CF17_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF17_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_CF18_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF18_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_CF19_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF19_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19_SHIFT 6
u8 flags7;
-#define XSTORM_RDMA_CONN_AG_CTX_CF20_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF20_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_CF21_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF21_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_SHIFT 6
-#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_SHIFT 7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14EN_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16EN_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_SHIFT 1
-#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_SHIFT 3
-#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18EN_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19EN_SHIFT 1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20EN_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21EN_SHIFT 3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23EN_SHIFT 5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 7
u8 flags11;
-#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_BIT17_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT17_SHIFT 1
-#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_CF23_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF23_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_MIGRATION_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_MIGRATION_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT17_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT17_SHIFT 1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RESERVED_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RESERVED_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 word1;
@@ -7301,37 +7853,37 @@ struct xstorm_rdma_conn_ag_ctx {
__le32 reg6;
};
-struct ystorm_rdma_conn_ag_ctx {
+struct e4_ystorm_rdma_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define YSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
-#define YSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2
-#define YSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
-#define YSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
-#define YSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
-#define YSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0
-#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
-#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -7345,62 +7897,70 @@ struct ystorm_rdma_conn_ag_ctx {
__le32 reg3;
};
-struct mstorm_roce_conn_st_ctx {
- struct regpair temp[6];
+/* The roce storm context of Ystorm */
+struct ystorm_roce_conn_st_ctx {
+ struct regpair temp[2];
};
+/* The roce storm context of Mstorm */
struct pstorm_roce_conn_st_ctx {
struct regpair temp[16];
};
-struct ystorm_roce_conn_st_ctx {
- struct regpair temp[2];
-};
-
+/* The roce storm context of Xstorm */
struct xstorm_roce_conn_st_ctx {
struct regpair temp[24];
};
+/* The roce storm context of Tstorm */
struct tstorm_roce_conn_st_ctx {
struct regpair temp[30];
};
+/* The roce storm context of Mstorm */
+struct mstorm_roce_conn_st_ctx {
+ struct regpair temp[6];
+};
+
+/* The roce storm context of Ystorm */
struct ustorm_roce_conn_st_ctx {
struct regpair temp[12];
};
-struct roce_conn_context {
+/* roce connection context */
+struct e4_roce_conn_context {
struct ystorm_roce_conn_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct pstorm_roce_conn_st_ctx pstorm_st_context;
struct xstorm_roce_conn_st_ctx xstorm_st_context;
struct regpair xstorm_st_padding[2];
- struct xstorm_rdma_conn_ag_ctx xstorm_ag_context;
- struct tstorm_rdma_conn_ag_ctx tstorm_ag_context;
+ struct e4_xstorm_rdma_conn_ag_ctx xstorm_ag_context;
+ struct e4_tstorm_rdma_conn_ag_ctx tstorm_ag_context;
struct timers_context timer_context;
- struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+ struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context;
struct tstorm_roce_conn_st_ctx tstorm_st_context;
struct mstorm_roce_conn_st_ctx mstorm_st_context;
struct ustorm_roce_conn_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2];
};
+/* roce create qp requester ramrod data */
struct roce_create_qp_req_ramrod_data {
__le16 flags;
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 2
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_MASK 0x1
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_SHIFT 3
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK 0x7
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT 4
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 7
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 8
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 12
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 2
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_SHIFT 3
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK 0x7
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT 4
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 7
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 8
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 12
u8 max_ord;
u8 traffic_class;
u8 hop_limit;
@@ -7431,26 +7991,27 @@ struct roce_create_qp_req_ramrod_data {
__le16 dpi;
};
+/* roce create qp responder ramrod data */
struct roce_create_qp_resp_ramrod_data {
__le16 flags;
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 4
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_MASK 0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT 5
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK 0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_MASK 0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_SHIFT 7
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT 8
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 11
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 4
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT 5
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_SHIFT 7
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT 8
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 11
u8 max_ird;
u8 traffic_class;
u8 hop_limit;
@@ -7482,24 +8043,40 @@ struct roce_create_qp_resp_ramrod_data {
__le16 dpi;
};
+/* roce DCQCN received statistics */
+struct roce_dcqcn_received_stats {
+ struct regpair ecn_pkt_rcv;
+ struct regpair cnp_pkt_rcv;
+};
+
+/* roce DCQCN sent statistics */
+struct roce_dcqcn_sent_stats {
+ struct regpair cnp_pkt_sent;
+};
+
+/* RoCE destroy qp requester output params */
struct roce_destroy_qp_req_output_params {
__le32 num_bound_mw;
__le32 cq_prod;
};
+/* RoCE destroy qp requester ramrod data */
struct roce_destroy_qp_req_ramrod_data {
struct regpair output_params_addr;
};
+/* RoCE destroy qp responder output params */
struct roce_destroy_qp_resp_output_params {
__le32 num_invalidated_mw;
__le32 cq_prod;
};
+/* RoCE destroy qp responder ramrod data */
struct roce_destroy_qp_resp_ramrod_data {
struct regpair output_params_addr;
};
+/* roce special events statistics */
struct roce_events_stats {
__le16 silent_drops;
__le16 rnr_naks_sent;
@@ -7508,6 +8085,7 @@ struct roce_events_stats {
__le32 reserved;
};
+/* ROCE slow path EQ cmd IDs */
enum roce_event_opcode {
ROCE_EVENT_CREATE_QP = 11,
ROCE_EVENT_MODIFY_QP,
@@ -7518,6 +8096,7 @@ enum roce_event_opcode {
MAX_ROCE_EVENT_OPCODE
};
+/* roce func init ramrod data */
struct roce_init_func_params {
u8 ll2_queue_id;
u8 cnp_vlan_priority;
@@ -7526,42 +8105,46 @@ struct roce_init_func_params {
__le32 cnp_send_timeout;
};
+/* roce func init ramrod data */
struct roce_init_func_ramrod_data {
struct rdma_init_func_ramrod_data rdma;
struct roce_init_func_params roce;
};
+/* roce modify qp requester ramrod data */
struct roce_modify_qp_req_ramrod_data {
__le16 flags;
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_SHIFT 1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_SHIFT 2
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_SHIFT 3
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 4
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_SHIFT 5
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_SHIFT 6
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_SHIFT 7
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_SHIFT 8
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT 9
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK 0x7
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x7
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 13
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_SHIFT 1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_SHIFT 2
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_SHIFT 3
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 4
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_SHIFT 5
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_SHIFT 6
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_SHIFT 7
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_SHIFT 8
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT 9
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK 0x7
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUES_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUES_FLG_SHIFT 13
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x3
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 14
u8 fields;
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 0
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 4
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 0
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 4
u8 max_ord;
u8 traffic_class;
u8 hop_limit;
@@ -7570,66 +8153,76 @@ struct roce_modify_qp_req_ramrod_data {
__le32 ack_timeout_val;
__le16 mtu;
__le16 reserved2;
- __le32 reserved3[3];
+ __le32 reserved3[2];
+ __le16 low_latency_phy_queue;
+ __le16 regular_latency_phy_queue;
__le32 src_gid[4];
__le32 dst_gid[4];
};
+/* roce modify qp responder ramrod data */
struct roce_modify_qp_resp_ramrod_data {
__le16 flags;
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 2
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 3
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_SHIFT 4
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 5
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_SHIFT 6
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_SHIFT 7
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT 8
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x3F
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 10
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 2
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 3
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_SHIFT 4
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 5
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_SHIFT 6
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_SHIFT 7
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT 8
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUES_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUES_FLG_SHIFT 10
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x1F
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 11
u8 fields;
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT 0
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 3
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT 0
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 3
u8 max_ird;
u8 traffic_class;
u8 hop_limit;
__le16 p_key;
__le32 flow_label;
__le16 mtu;
- __le16 reserved2;
+ __le16 low_latency_phy_queue;
+ __le16 regular_latency_phy_queue;
+ u8 reserved2[6];
__le32 src_gid[4];
__le32 dst_gid[4];
};
+/* RoCE query qp requester output params */
struct roce_query_qp_req_output_params {
__le32 psn;
__le32 flags;
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK 0x1
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT 0
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_MASK 0x1
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_SHIFT 1
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK 0x3FFFFFFF
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 2
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK 0x1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT 0
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_MASK 0x1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_SHIFT 1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK 0x3FFFFFFF
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 2
};
+/* RoCE query qp requester ramrod data */
struct roce_query_qp_req_ramrod_data {
struct regpair output_params_addr;
};
+/* RoCE query qp responder output params */
struct roce_query_qp_resp_output_params {
__le32 psn;
__le32 err_flag;
@@ -7639,10 +8232,12 @@ struct roce_query_qp_resp_output_params {
#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_SHIFT 1
};
+/* RoCE query qp responder ramrod data */
struct roce_query_qp_resp_ramrod_data {
struct regpair output_params_addr;
};
+/* ROCE ramrod command IDs */
enum roce_ramrod_cmd_id {
ROCE_RAMROD_CREATE_QP = 11,
ROCE_RAMROD_MODIFY_QP,
@@ -7653,163 +8248,163 @@ enum roce_ramrod_cmd_id {
MAX_ROCE_RAMROD_CMD_ID
};
-struct mstorm_roce_req_conn_ag_ctx {
+struct e4_mstorm_roce_req_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct mstorm_roce_resp_conn_ag_ctx {
+struct e4_mstorm_roce_resp_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct tstorm_roce_req_conn_ag_ctx {
+struct e4_tstorm_roce_req_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_SHIFT 1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_SHIFT 2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_SHIFT 1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_SHIFT 2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6
u8 flags1;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags2;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6
u8 flags3;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 5
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
u8 flags4;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 snd_nxt_psn;
__le32 snd_max_psn;
@@ -7825,95 +8420,95 @@ struct tstorm_roce_req_conn_ag_ctx {
u8 byte4;
u8 byte5;
__le16 snd_sq_cons;
- __le16 word2;
+ __le16 conn_dpi;
__le16 word3;
__le32 reg9;
__le32 reg10;
};
-struct tstorm_roce_resp_conn_ag_ctx {
+struct e4_tstorm_roce_resp_conn_ag_ctx {
u8 byte0;
u8 state;
u8 flags0;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags2;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 5
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 5
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 psn_and_rxmit_id_echo;
__le32 reg1;
__le32 reg2;
@@ -7935,63 +8530,63 @@ struct tstorm_roce_resp_conn_ag_ctx {
__le32 reg10;
};
-struct ustorm_roce_req_conn_ag_ctx {
+struct e4_ustorm_roce_req_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -8004,63 +8599,63 @@ struct ustorm_roce_req_conn_ag_ctx {
__le16 word3;
};
-struct ustorm_roce_resp_conn_ag_ctx {
+struct e4_ustorm_roce_resp_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -8073,214 +8668,214 @@ struct ustorm_roce_resp_conn_ag_ctx {
__le16 word3;
};
-struct xstorm_roce_req_conn_ag_ctx {
+struct e4_xstorm_roce_req_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
u8 flags2;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags4;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6
u8 flags7;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7
u8 flags11;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7
u8 flags13;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 word1;
@@ -8302,224 +8897,224 @@ struct xstorm_roce_req_conn_ag_ctx {
__le32 orq_cons;
};
-struct xstorm_roce_resp_conn_ag_ctx {
+struct e4_xstorm_roce_resp_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
u8 flags2;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags4;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6
u8 flags7;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7
u8 flags11;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
- __le16 word1;
- __le16 irq_prod;
- __le16 word3;
- __le16 word4;
- __le16 ereserved1;
+ __le16 irq_prod_shadow;
+ __le16 word2;
__le16 irq_cons;
+ __le16 irq_prod;
+ __le16 e5_reserved1;
+ __le16 conn_dpi;
u8 rxmit_opcode;
u8 byte4;
u8 byte5;
@@ -8533,37 +9128,37 @@ struct xstorm_roce_resp_conn_ag_ctx {
__le32 msn_and_syndrome;
};
-struct ystorm_roce_req_conn_ag_ctx {
+struct e4_ystorm_roce_req_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -8577,37 +9172,37 @@ struct ystorm_roce_req_conn_ag_ctx {
__le32 reg3;
};
-struct ystorm_roce_resp_conn_ag_ctx {
+struct e4_ystorm_roce_resp_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -8621,6 +9216,7 @@ struct ystorm_roce_resp_conn_ag_ctx {
__le32 reg3;
};
+/* Roce doorbell data */
enum roce_flavor {
PLAIN_ROCE,
RROCE_IPV4,
@@ -8628,228 +9224,231 @@ enum roce_flavor {
MAX_ROCE_FLAVOR
};
+/* The iwarp storm context of Ystorm */
struct ystorm_iwarp_conn_st_ctx {
__le32 reserved[4];
};
+/* The iwarp storm context of Pstorm */
struct pstorm_iwarp_conn_st_ctx {
__le32 reserved[36];
};
+/* The iwarp storm context of Xstorm */
struct xstorm_iwarp_conn_st_ctx {
__le32 reserved[44];
};
-struct xstorm_iwarp_conn_ag_ctx {
+struct e4_xstorm_iwarp_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5
-#define XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7
u8 flags1;
-#define XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7
u8 flags2;
-#define XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
u8 flags3;
-#define XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
u8 flags7;
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
-#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5
-#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
-#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_CF23EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF23EN_SHIFT 5
-#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23EN_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7
u8 flags11;
-#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1
-#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
-#define XSTORM_IWARP_CONN_AG_CTX_CF23_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF23_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 physical_q1;
@@ -8897,89 +9496,89 @@ struct xstorm_iwarp_conn_ag_ctx {
__le32 reg17;
};
-struct tstorm_iwarp_conn_ag_ctx {
+struct e4_tstorm_iwarp_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 3
-#define TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
-#define TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0
-#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2
-#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
-#define TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2
-#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5
-#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6
-#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_SHIFT 0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
u8 flags4;
-#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6
-#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_SHIFT 5
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5
-#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 unaligned_nxt_seq;
@@ -9001,51 +9600,56 @@ struct tstorm_iwarp_conn_ag_ctx {
__le32 last_hq_sequence;
};
+/* The iwarp storm context of Tstorm */
struct tstorm_iwarp_conn_st_ctx {
__le32 reserved[60];
};
+/* The iwarp storm context of Mstorm */
struct mstorm_iwarp_conn_st_ctx {
__le32 reserved[32];
};
+/* The iwarp storm context of Ustorm */
struct ustorm_iwarp_conn_st_ctx {
__le32 reserved[24];
};
-struct iwarp_conn_context {
+/* iwarp connection context */
+struct e4_iwarp_conn_context {
struct ystorm_iwarp_conn_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct pstorm_iwarp_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct xstorm_iwarp_conn_st_ctx xstorm_st_context;
struct regpair xstorm_st_padding[2];
- struct xstorm_iwarp_conn_ag_ctx xstorm_ag_context;
- struct tstorm_iwarp_conn_ag_ctx tstorm_ag_context;
+ struct e4_xstorm_iwarp_conn_ag_ctx xstorm_ag_context;
+ struct e4_tstorm_iwarp_conn_ag_ctx tstorm_ag_context;
struct timers_context timer_context;
- struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+ struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context;
struct tstorm_iwarp_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
struct mstorm_iwarp_conn_st_ctx mstorm_st_context;
struct ustorm_iwarp_conn_st_ctx ustorm_st_context;
};
+/* iWARP create QP params passed by driver to FW in CreateQP Request Ramrod */
struct iwarp_create_qp_ramrod_data {
u8 flags;
#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 0
-#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_MASK 0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_SHIFT 1
-#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2
-#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3
-#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 4
-#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_MASK 0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_SHIFT 5
-#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK 0x3
-#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT 6
+#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 0
+#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_SHIFT 1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3
+#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 4
+#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_SHIFT 5
+#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK 0x3
+#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT 6
u8 reserved1;
__le16 pd;
__le16 sq_num_pages;
@@ -9061,6 +9665,7 @@ struct iwarp_create_qp_ramrod_data {
u8 reserved2[6];
};
+/* iWARP completion queue types */
enum iwarp_eqe_async_opcode {
IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE,
IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED,
@@ -9083,6 +9688,7 @@ struct iwarp_eqe_data_tcp_async_completion {
u8 reserved[5];
};
+/* iWARP completion queue types */
enum iwarp_eqe_sync_opcode {
IWARP_EVENT_TYPE_TCP_OFFLOAD =
11,
@@ -9095,6 +9701,7 @@ enum iwarp_eqe_sync_opcode {
MAX_IWARP_EQE_SYNC_OPCODE
};
+/* iWARP EQE completion status */
enum iwarp_fw_return_code {
IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET = 5,
IWARP_CONN_ERROR_TCP_CONNECTION_RST,
@@ -9125,54 +9732,60 @@ enum iwarp_fw_return_code {
MAX_IWARP_FW_RETURN_CODE
};
+/* unaligned opaque data received from LL2 */
struct iwarp_init_func_params {
u8 ll2_ooo_q_index;
u8 reserved1[7];
};
+/* iwarp func init ramrod data */
struct iwarp_init_func_ramrod_data {
struct rdma_init_func_ramrod_data rdma;
struct tcp_init_params tcp;
struct iwarp_init_func_params iwarp;
};
+/* iWARP QP - possible states to transition to */
enum iwarp_modify_qp_new_state_type {
IWARP_MODIFY_QP_STATE_CLOSING = 1,
- IWARP_MODIFY_QP_STATE_ERROR =
- 2,
+ IWARP_MODIFY_QP_STATE_ERROR = 2,
MAX_IWARP_MODIFY_QP_NEW_STATE_TYPE
};
+/* iwarp modify qp responder ramrod data */
struct iwarp_modify_qp_ramrod_data {
__le16 transition_to_state;
__le16 flags;
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 0
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 1
-#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
-#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 2
-#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 0
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 1
+#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 2
+#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_MASK 0x1
#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_SHIFT 3
#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 4
-#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK 0x7FF
-#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT 5
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 4
+#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK 0x7FF
+#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT 5
__le32 reserved3[3];
__le32 reserved4[8];
};
+/* MPA params for Enhanced mode */
struct mpa_rq_params {
__le32 ird;
__le32 ord;
};
+/* MPA host Address-Len for private data */
struct mpa_ulp_buffer {
struct regpair addr;
__le16 len;
__le16 reserved[3];
};
+/* iWARP MPA offload params common to Basic and Enhanced modes */
struct mpa_outgoing_params {
u8 crc_needed;
u8 reject;
@@ -9181,6 +9794,9 @@ struct mpa_outgoing_params {
struct mpa_ulp_buffer outgoing_ulp_buffer;
};
+/* iWARP MPA offload params passed by driver to FW in MPA Offload Request
+ * Ramrod.
+ */
struct iwarp_mpa_offload_ramrod_data {
struct mpa_outgoing_params common;
__le32 tcp_cid;
@@ -9188,18 +9804,20 @@ struct iwarp_mpa_offload_ramrod_data {
u8 tcp_connect_side;
u8 rtr_pref;
#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_MASK 0x7
-#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_SHIFT 0
-#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_MASK 0x1F
-#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_SHIFT 3
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_SHIFT 0
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_MASK 0x1F
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_SHIFT 3
u8 reserved2;
struct mpa_ulp_buffer incoming_ulp_buffer;
struct regpair async_eqe_output_buf;
struct regpair handle_for_async;
struct regpair shared_queue_addr;
+ __le16 rcv_wnd;
u8 stats_counter_id;
- u8 reserved3[15];
+ u8 reserved3[13];
};
+/* iWARP TCP connection offload params passed by driver to FW */
struct iwarp_offload_params {
struct mpa_ulp_buffer incoming_ulp_buffer;
struct regpair async_eqe_output_buf;
@@ -9211,22 +9829,24 @@ struct iwarp_offload_params {
u8 reserved[10];
};
+/* iWARP query QP output params */
struct iwarp_query_qp_output_params {
__le32 flags;
#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_MASK 0x1
-#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0
+#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0
#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF
-#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_SHIFT 1
+#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_SHIFT 1
u8 reserved1[4];
};
+/* iWARP query QP ramrod data */
struct iwarp_query_qp_ramrod_data {
struct regpair output_params_addr;
};
+/* iWARP Ramrod Command IDs */
enum iwarp_ramrod_cmd_id {
- IWARP_RAMROD_CMD_ID_TCP_OFFLOAD =
- 11,
+ IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = 11,
IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
IWARP_RAMROD_CMD_ID_CREATE_QP,
@@ -9236,22 +9856,28 @@ enum iwarp_ramrod_cmd_id {
MAX_IWARP_RAMROD_CMD_ID
};
+/* Per PF iWARP retransmit path statistics */
struct iwarp_rxmit_stats_drv {
struct regpair tx_go_to_slow_start_event_cnt;
struct regpair tx_fast_retransmit_event_cnt;
};
+/* iWARP and TCP connection offload params passed by driver to FW in iWARP
+ * offload ramrod.
+ */
struct iwarp_tcp_offload_ramrod_data {
struct iwarp_offload_params iwarp;
struct tcp_offload_params_opt2 tcp;
};
+/* iWARP MPA negotiation types */
enum mpa_negotiation_mode {
MPA_NEGOTIATION_TYPE_BASIC = 1,
MPA_NEGOTIATION_TYPE_ENHANCED = 2,
MAX_MPA_NEGOTIATION_MODE
};
+/* iWARP MPA Enhanced mode RTR types */
enum mpa_rtr_type {
MPA_RTR_TYPE_NONE = 0,
MPA_RTR_TYPE_ZERO_SEND = 1,
@@ -9264,113 +9890,114 @@ enum mpa_rtr_type {
MAX_MPA_RTR_TYPE
};
+/* unaligned opaque data received from LL2 */
struct unaligned_opaque_data {
__le16 first_mpa_offset;
u8 tcp_payload_offset;
u8 flags;
#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_MASK 0x1
-#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_SHIFT 0
-#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_MASK 0x1
-#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_SHIFT 1
-#define UNALIGNED_OPAQUE_DATA_RESERVED_MASK 0x3F
-#define UNALIGNED_OPAQUE_DATA_RESERVED_SHIFT 2
+#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_SHIFT 0
+#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_MASK 0x1
+#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_SHIFT 1
+#define UNALIGNED_OPAQUE_DATA_RESERVED_MASK 0x3F
+#define UNALIGNED_OPAQUE_DATA_RESERVED_SHIFT 2
__le32 cid;
};
-struct mstorm_iwarp_conn_ag_ctx {
+struct e4_mstorm_iwarp_conn_ag_ctx {
u8 reserved;
u8 state;
u8 flags0;
-#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3
-#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2
-#define MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3
+#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0
-#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6
-#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 rcq_cons;
__le16 rcq_cons_th;
__le32 reg0;
__le32 reg1;
};
-struct ustorm_iwarp_conn_ag_ctx {
+struct e4_ustorm_iwarp_conn_ag_ctx {
u8 reserved;
u8 byte1;
u8 flags0;
-#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
-#define USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
-#define USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
u8 flags3;
-#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0
-#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -9383,37 +10010,37 @@ struct ustorm_iwarp_conn_ag_ctx {
__le16 word3;
};
-struct ystorm_iwarp_conn_ag_ctx {
+struct e4_ystorm_iwarp_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
-#define YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
-#define YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
-#define YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
-#define YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
-#define YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -9427,6 +10054,7 @@ struct ystorm_iwarp_conn_ag_ctx {
__le32 reg3;
};
+/* The fcoe storm context of Ystorm */
struct ystorm_fcoe_conn_st_ctx {
u8 func_mode;
u8 cos;
@@ -9442,45 +10070,49 @@ struct ystorm_fcoe_conn_st_ctx {
struct regpair reserved;
__le16 min_frame_size;
u8 protection_info_flags;
-#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1
-#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0
-#define YSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1
-#define YSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 1
-#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_MASK 0x3F
-#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_SHIFT 2
+#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1
+#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0
+#define YSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1
+#define YSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 1
+#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_MASK 0x3F
+#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_SHIFT 2
u8 dst_protection_per_mss;
u8 src_protection_per_mss;
u8 ptu_log_page_size;
u8 flags;
-#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1
-#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 0
-#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1
-#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 1
-#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x3F
-#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 2
+#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1
+#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 0
+#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1
+#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 1
+#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x3F
+#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 2
u8 fcp_xfer_size;
};
+/* FCoE 16-bits vlan structure */
struct fcoe_vlan_fields {
__le16 fields;
-#define FCOE_VLAN_FIELDS_VID_MASK 0xFFF
-#define FCOE_VLAN_FIELDS_VID_SHIFT 0
-#define FCOE_VLAN_FIELDS_CLI_MASK 0x1
-#define FCOE_VLAN_FIELDS_CLI_SHIFT 12
-#define FCOE_VLAN_FIELDS_PRI_MASK 0x7
-#define FCOE_VLAN_FIELDS_PRI_SHIFT 13
+#define FCOE_VLAN_FIELDS_VID_MASK 0xFFF
+#define FCOE_VLAN_FIELDS_VID_SHIFT 0
+#define FCOE_VLAN_FIELDS_CLI_MASK 0x1
+#define FCOE_VLAN_FIELDS_CLI_SHIFT 12
+#define FCOE_VLAN_FIELDS_PRI_MASK 0x7
+#define FCOE_VLAN_FIELDS_PRI_SHIFT 13
};
+/* FCoE 16-bits vlan union */
union fcoe_vlan_field_union {
struct fcoe_vlan_fields fields;
__le16 val;
};
+/* FCoE 16-bits vlan, vif union */
union fcoe_vlan_vif_field_union {
union fcoe_vlan_field_union vlan;
__le16 vif;
};
+/* Ethernet context section */
struct pstorm_fcoe_eth_context_section {
u8 remote_addr_3;
u8 remote_addr_2;
@@ -9500,6 +10132,7 @@ struct pstorm_fcoe_eth_context_section {
__le16 inner_eth_type;
};
+/* The fcoe storm context of Pstorm */
struct pstorm_fcoe_conn_st_ctx {
u8 func_mode;
u8 cos;
@@ -9513,16 +10146,18 @@ struct pstorm_fcoe_conn_st_ctx {
u8 sid_1;
u8 sid_0;
u8 flags;
-#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_MASK 0x1
-#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_SHIFT 0
-#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_MASK 0x1
-#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_SHIFT 1
-#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1
-#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 2
-#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1
-#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 3
-#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0xF
-#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 4
+#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_MASK 0x1
+#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_SHIFT 0
+#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_MASK 0x1
+#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_SHIFT 1
+#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1
+#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 2
+#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1
+#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 3
+#define PSTORM_FCOE_CONN_ST_CTX_SINGLE_VLAN_FLAG_MASK 0x1
+#define PSTORM_FCOE_CONN_ST_CTX_SINGLE_VLAN_FLAG_SHIFT 4
+#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0x7
+#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 5
u8 did_2;
u8 did_1;
u8 did_0;
@@ -9532,6 +10167,7 @@ struct pstorm_fcoe_conn_st_ctx {
u8 reserved1;
};
+/* The fcoe storm context of Xstorm */
struct xstorm_fcoe_conn_st_ctx {
u8 func_mode;
u8 src_mac_index;
@@ -9539,16 +10175,16 @@ struct xstorm_fcoe_conn_st_ctx {
u8 cached_wqes_avail;
__le16 stat_ram_addr;
u8 flags;
-#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_MASK 0x1
-#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_SHIFT 0
-#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1
-#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 1
-#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_MASK 0x1
-#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_SHIFT 2
-#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_MASK 0x3
-#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_SHIFT 3
-#define XSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x7
-#define XSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 5
+#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_SHIFT 0
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 1
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_SHIFT 2
+#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_MASK 0x3
+#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_SHIFT 3
+#define XSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x7
+#define XSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 5
u8 cached_wqes_offset;
u8 reserved2;
u8 eth_hdr_size;
@@ -9574,18 +10210,18 @@ struct xstorm_fcoe_conn_st_ctx {
u8 fcp_cmd_byte_credit;
u8 fcp_rsp_byte_credit;
__le16 protection_info;
-#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_MASK 0x1
-#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_SHIFT 0
-#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1
-#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 1
-#define XSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1
-#define XSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 2
-#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_MASK 0x1
-#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_SHIFT 3
-#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_MASK 0xF
-#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_SHIFT 4
-#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_MASK 0xFF
-#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_SHIFT 8
+#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_SHIFT 0
+#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 1
+#define XSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 2
+#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_SHIFT 3
+#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_MASK 0xF
+#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_SHIFT 4
+#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_MASK 0xFF
+#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_SHIFT 8
__le16 xferq_pbl_next_index;
__le16 page_size;
u8 mid_seq;
@@ -9594,216 +10230,216 @@ struct xstorm_fcoe_conn_st_ctx {
struct fcoe_wqe cached_wqes[16];
};
-struct xstorm_fcoe_conn_ag_ctx {
+struct e4_xstorm_fcoe_conn_ag_ctx {
u8 reserved0;
- u8 fcoe_state;
+ u8 state;
u8 flags0;
-#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6
-#define XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7
u8 flags2;
-#define XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6
u8 flags7;
-#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5
-#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6
-#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1
-#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7
u8 flags12;
-#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3
-#define XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5
-#define XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 word1;
@@ -9831,6 +10467,7 @@ struct xstorm_fcoe_conn_ag_ctx {
__le32 reg8;
};
+/* The fcoe storm context of Ustorm */
struct ustorm_fcoe_conn_st_ctx {
struct regpair respq_pbl_addr;
__le16 num_pages_in_pbl;
@@ -9840,150 +10477,150 @@ struct ustorm_fcoe_conn_st_ctx {
u8 reserved[2];
};
-struct tstorm_fcoe_conn_ag_ctx {
+struct e4_tstorm_fcoe_conn_ag_ctx {
u8 reserved0;
- u8 fcoe_state;
+ u8 state;
u8 flags0;
-#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6
u8 flags1;
-#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0
-#define TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
-#define TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4
-#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
u8 flags4;
-#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
};
-struct ustorm_fcoe_conn_ag_ctx {
+struct e4_ustorm_fcoe_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2
-#define USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4
-#define USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4
-#define USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5
-#define USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -9996,72 +10633,76 @@ struct ustorm_fcoe_conn_ag_ctx {
__le16 word3;
};
+/* The fcoe storm context of Tstorm */
struct tstorm_fcoe_conn_st_ctx {
__le16 stat_ram_addr;
__le16 rx_max_fc_payload_len;
__le16 e_d_tov_val;
u8 flags;
-#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_MASK 0x1
-#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_SHIFT 0
-#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_MASK 0x1
-#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_SHIFT 1
-#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_MASK 0x3F
-#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_SHIFT 2
+#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_MASK 0x1
+#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_SHIFT 0
+#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_MASK 0x1
+#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_SHIFT 1
+#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_MASK 0x3F
+#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_SHIFT 2
u8 timers_cleanup_invocation_cnt;
__le32 reserved1[2];
- __le32 dst_mac_address_bytes0to3;
- __le16 dst_mac_address_bytes4to5;
+ __le32 dst_mac_address_bytes_0_to_3;
+ __le16 dst_mac_address_bytes_4_to_5;
__le16 ramrod_echo;
u8 flags1;
-#define TSTORM_FCOE_CONN_ST_CTX_MODE_MASK 0x3
-#define TSTORM_FCOE_CONN_ST_CTX_MODE_SHIFT 0
-#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0x3F
-#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 2
- u8 q_relative_offset;
+#define TSTORM_FCOE_CONN_ST_CTX_MODE_MASK 0x3
+#define TSTORM_FCOE_CONN_ST_CTX_MODE_SHIFT 0
+#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0x3F
+#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 2
+ u8 cq_relative_offset;
+ u8 cmdq_relative_offset;
u8 bdq_resource_id;
- u8 reserved0[5];
+ u8 reserved0[4];
};
-struct mstorm_fcoe_conn_ag_ctx {
+struct e4_mstorm_fcoe_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
-#define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
-#define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
+/* Fast path part of the fcoe storm context of Mstorm */
struct fcoe_mstorm_fcoe_conn_st_ctx_fp {
__le16 xfer_prod;
- __le16 reserved1;
+ u8 num_cqs;
+ u8 reserved1;
u8 protection_info;
#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_MASK 0x1
#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_SHIFT 0
@@ -10073,6 +10714,7 @@ struct fcoe_mstorm_fcoe_conn_st_ctx_fp {
u8 reserved2[2];
};
+/* Non fast path part of the fcoe storm context of Mstorm */
struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp {
__le16 conn_id;
__le16 stat_ram_addr;
@@ -10088,37 +10730,46 @@ struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp {
struct regpair reserved2[3];
};
+/* The fcoe storm context of Mstorm */
struct mstorm_fcoe_conn_st_ctx {
struct fcoe_mstorm_fcoe_conn_st_ctx_fp fp;
struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp non_fp;
};
-struct fcoe_conn_context {
+/* fcoe connection context */
+struct e4_fcoe_conn_context {
struct ystorm_fcoe_conn_st_ctx ystorm_st_context;
struct pstorm_fcoe_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct xstorm_fcoe_conn_st_ctx xstorm_st_context;
- struct xstorm_fcoe_conn_ag_ctx xstorm_ag_context;
+ struct e4_xstorm_fcoe_conn_ag_ctx xstorm_ag_context;
struct regpair xstorm_ag_padding[6];
struct ustorm_fcoe_conn_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2];
- struct tstorm_fcoe_conn_ag_ctx tstorm_ag_context;
+ struct e4_tstorm_fcoe_conn_ag_ctx tstorm_ag_context;
struct regpair tstorm_ag_padding[2];
struct timers_context timer_context;
- struct ustorm_fcoe_conn_ag_ctx ustorm_ag_context;
+ struct e4_ustorm_fcoe_conn_ag_ctx ustorm_ag_context;
struct tstorm_fcoe_conn_st_ctx tstorm_st_context;
- struct mstorm_fcoe_conn_ag_ctx mstorm_ag_context;
+ struct e4_mstorm_fcoe_conn_ag_ctx mstorm_ag_context;
struct mstorm_fcoe_conn_st_ctx mstorm_st_context;
};
+/* FCoE connection offload params passed by driver to FW in FCoE offload
+ * ramrod.
+ */
struct fcoe_conn_offload_ramrod_params {
struct fcoe_conn_offload_ramrod_data offload_ramrod_data;
};
+/* FCoE connection terminate params passed by driver to FW in FCoE terminate
+ * conn ramrod.
+ */
struct fcoe_conn_terminate_ramrod_params {
struct fcoe_conn_terminate_ramrod_data terminate_ramrod_data;
};
+/* FCoE event type */
enum fcoe_event_type {
FCOE_EVENT_INIT_FUNC,
FCOE_EVENT_DESTROY_FUNC,
@@ -10129,10 +10780,12 @@ enum fcoe_event_type {
MAX_FCOE_EVENT_TYPE
};
+/* FCoE init params passed by driver to FW in FCoE init ramrod */
struct fcoe_init_ramrod_params {
struct fcoe_init_func_ramrod_data init_ramrod_data;
};
+/* FCoE ramrod Command IDs */
enum fcoe_ramrod_cmd_id {
FCOE_RAMROD_CMD_ID_INIT_FUNC,
FCOE_RAMROD_CMD_ID_DESTROY_FUNC,
@@ -10142,41 +10795,44 @@ enum fcoe_ramrod_cmd_id {
MAX_FCOE_RAMROD_CMD_ID
};
+/* FCoE statistics params buffer passed by driver to FW in FCoE statistics
+ * ramrod.
+ */
struct fcoe_stat_ramrod_params {
struct fcoe_stat_ramrod_data stat_ramrod_data;
};
-struct ystorm_fcoe_conn_ag_ctx {
+struct e4_ystorm_fcoe_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
-#define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
-#define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
-#define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
-#define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -10190,230 +10846,233 @@ struct ystorm_fcoe_conn_ag_ctx {
__le32 reg3;
};
+/* The iscsi storm connection context of Ystorm */
struct ystorm_iscsi_conn_st_ctx {
- __le32 reserved[4];
+ __le32 reserved[8];
};
+/* Combined iSCSI and TCP storm connection of Pstorm */
struct pstorm_iscsi_tcp_conn_st_ctx {
__le32 tcp[32];
__le32 iscsi[4];
};
+/* The combined tcp and iscsi storm context of Xstorm */
struct xstorm_iscsi_tcp_conn_st_ctx {
- __le32 reserved_iscsi[40];
__le32 reserved_tcp[4];
+ __le32 reserved_iscsi[44];
};
-struct xstorm_iscsi_conn_ag_ctx {
+struct e4_xstorm_iscsi_conn_ag_ctx {
u8 cdu_validation;
u8 state;
u8 flags0;
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7
u8 flags1;
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6
-#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7
u8 flags2;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
u8 flags3;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6
u8 flags6;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
u8 flags7;
-#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6
-#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3
-#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7
u8 flags11;
-#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 physical_q1;
@@ -10449,7 +11108,7 @@ struct xstorm_iscsi_conn_ag_ctx {
u8 byte13;
u8 byte14;
u8 byte15;
- u8 ereserved;
+ u8 e5_reserved;
__le16 word11;
__le32 reg10;
__le32 reg11;
@@ -10461,89 +11120,89 @@ struct xstorm_iscsi_conn_ag_ctx {
__le32 reg17;
};
-struct tstorm_iscsi_conn_ag_ctx {
+struct e4_tstorm_iscsi_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0
-#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2
-#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
-#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5
-#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6
-#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
u8 flags4;
-#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
-#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 reg2;
@@ -10558,63 +11217,63 @@ struct tstorm_iscsi_conn_ag_ctx {
__le16 word0;
};
-struct ustorm_iscsi_conn_ag_ctx {
+struct e4_ustorm_iscsi_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2
-#define USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4
-#define USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4
-#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5
-#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -10627,113 +11286,117 @@ struct ustorm_iscsi_conn_ag_ctx {
__le16 word3;
};
+/* The iscsi storm connection context of Tstorm */
struct tstorm_iscsi_conn_st_ctx {
- __le32 reserved[40];
+ __le32 reserved[44];
};
-struct mstorm_iscsi_conn_ag_ctx {
+struct e4_mstorm_iscsi_conn_ag_ctx {
u8 reserved;
u8 state;
u8 flags0;
-#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
-#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
+/* Combined iSCSI and TCP storm connection of Mstorm */
struct mstorm_iscsi_tcp_conn_st_ctx {
__le32 reserved_tcp[20];
- __le32 reserved_iscsi[8];
+ __le32 reserved_iscsi[12];
};
+/* The iscsi storm context of Ustorm */
struct ustorm_iscsi_conn_st_ctx {
__le32 reserved[52];
};
-struct iscsi_conn_context {
+/* iscsi connection context */
+struct e4_iscsi_conn_context {
struct ystorm_iscsi_conn_st_ctx ystorm_st_context;
- struct regpair ystorm_st_padding[2];
struct pstorm_iscsi_tcp_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct pb_context xpb2_context;
struct xstorm_iscsi_tcp_conn_st_ctx xstorm_st_context;
struct regpair xstorm_st_padding[2];
- struct xstorm_iscsi_conn_ag_ctx xstorm_ag_context;
- struct tstorm_iscsi_conn_ag_ctx tstorm_ag_context;
+ struct e4_xstorm_iscsi_conn_ag_ctx xstorm_ag_context;
+ struct e4_tstorm_iscsi_conn_ag_ctx tstorm_ag_context;
struct regpair tstorm_ag_padding[2];
struct timers_context timer_context;
- struct ustorm_iscsi_conn_ag_ctx ustorm_ag_context;
+ struct e4_ustorm_iscsi_conn_ag_ctx ustorm_ag_context;
struct pb_context upb_context;
struct tstorm_iscsi_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
- struct mstorm_iscsi_conn_ag_ctx mstorm_ag_context;
+ struct e4_mstorm_iscsi_conn_ag_ctx mstorm_ag_context;
struct mstorm_iscsi_tcp_conn_st_ctx mstorm_st_context;
struct ustorm_iscsi_conn_st_ctx ustorm_st_context;
};
+/* iSCSI init params passed by driver to FW in iSCSI init ramrod */
struct iscsi_init_ramrod_params {
struct iscsi_spe_func_init iscsi_init_spe;
struct tcp_init_params tcp_init;
};
-struct ystorm_iscsi_conn_ag_ctx {
+struct e4_ystorm_iscsi_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
-#define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
-#define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
-#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -11613,7 +12276,7 @@ struct public_drv_mb {
#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF
#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
-#define DRV_MB_PARAM_NVM_LEN_SHIFT 24
+#define DRV_MB_PARAM_NVM_LEN_OFFSET 24
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index a05feb38c6ee..fca2dbd93ad9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -807,3 +807,71 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
return rc;
}
+int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, const char *phase)
+{
+ u32 size = PAGE_SIZE / 2, val;
+ struct qed_dmae_params params;
+ int rc = 0;
+ dma_addr_t p_phys;
+ void *p_virt;
+ u32 *p_tmp;
+
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ 2 * size, &p_phys, GFP_KERNEL);
+ if (!p_virt) {
+ DP_NOTICE(p_hwfn,
+ "DMAE sanity [%s]: failed to allocate memory\n",
+ phase);
+ return -ENOMEM;
+ }
+
+ /* Fill the bottom half of the allocated memory with a known pattern */
+ for (p_tmp = (u32 *)p_virt;
+ p_tmp < (u32 *)((u8 *)p_virt + size); p_tmp++) {
+ /* Save the address itself as the value */
+ val = (u32)(uintptr_t)p_tmp;
+ *p_tmp = val;
+ }
+
+ /* Zero the top half of the allocated memory */
+ memset((u8 *)p_virt + size, 0, size);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "DMAE sanity [%s]: src_addr={phys 0x%llx, virt %p}, dst_addr={phys 0x%llx, virt %p}, size 0x%x\n",
+ phase,
+ (u64)p_phys,
+ p_virt, (u64)(p_phys + size), (u8 *)p_virt + size, size);
+
+ memset(&params, 0, sizeof(params));
+ rc = qed_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size,
+ size / 4 /* size_in_dwords */, &params);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "DMAE sanity [%s]: qed_dmae_host2host() failed. rc = %d.\n",
+ phase, rc);
+ goto out;
+ }
+
+ /* Verify that the top half of the allocated memory has the pattern */
+ for (p_tmp = (u32 *)((u8 *)p_virt + size);
+ p_tmp < (u32 *)((u8 *)p_virt + (2 * size)); p_tmp++) {
+ /* The corresponding address in the bottom half */
+ val = (u32)(uintptr_t)p_tmp - size;
+
+ if (*p_tmp != val) {
+ DP_NOTICE(p_hwfn,
+ "DMAE sanity [%s]: addr={phys 0x%llx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n",
+ phase,
+ (u64)p_phys + ((u8 *)p_tmp - (u8 *)p_virt),
+ p_tmp, *p_tmp, val);
+ rc = -EINVAL;
+ goto out;
+ }
+ }
+
+out:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, 2 * size, p_virt, p_phys);
+ return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index f2505c691c26..8db2839a8ec8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -299,4 +299,8 @@ union qed_qm_pq_params {
int qed_init_fw_data(struct qed_dev *cdev,
const u8 *fw_data);
+
+int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, const char *phase);
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index b069ad088269..18fb5062a83d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -31,6 +31,7 @@
*/
#include <linux/types.h>
+#include <linux/crc8.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -40,102 +41,197 @@
#include "qed_init_ops.h"
#include "qed_reg_addr.h"
+#define CDU_VALIDATION_DEFAULT_CFG 61
+
+static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
+ {400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */
+ {528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */
+ {608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */
+};
+
+static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
+ {240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
+};
+
/* General constants */
#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
QM_PQ_ELEMENT_SIZE, \
0x1000) : 0)
#define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \
0x100) - 1 : 0)
-#define QM_INVALID_PQ_ID 0xffff
+#define QM_INVALID_PQ_ID 0xffff
+
/* Feature enable */
-#define QM_BYPASS_EN 1
-#define QM_BYTE_CRD_EN 1
+#define QM_BYPASS_EN 1
+#define QM_BYTE_CRD_EN 1
+
/* Other PQ constants */
-#define QM_OTHER_PQS_PER_PF 4
+#define QM_OTHER_PQS_PER_PF 4
+
/* WFQ constants */
-#define QM_WFQ_UPPER_BOUND 62500000
-#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
-#define QM_WFQ_VP_PQ_PF_SHIFT 5
-#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
-#define QM_WFQ_MAX_INC_VAL 43750000
+
+/* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
+#define QM_WFQ_UPPER_BOUND 62500000
+
+/* Bit of VOQ in WFQ VP PQ map */
+#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
+
+/* Bit of PF in WFQ VP PQ map */
+#define QM_WFQ_VP_PQ_PF_E4_SHIFT 5
+
+/* 0x9000 = 4*9*1024 */
+#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
+
+/* Max WFQ increment value is 0.7 * upper bound */
+#define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10)
/* RL constants */
-#define QM_RL_UPPER_BOUND 62500000
-#define QM_RL_PERIOD 5 /* in us */
-#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
-#define QM_RL_MAX_INC_VAL 43750000
-#define QM_RL_INC_VAL(rate) max_t(u32, \
- (u32)(((rate ? rate : \
- 1000000) * \
- QM_RL_PERIOD * \
- 101) / (8 * 100)), 1)
+
+/* Period in us */
+#define QM_RL_PERIOD 5
+
+/* Period in 25MHz cycles */
+#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
+
+/* RL increment value - rate is specified in mbps */
+#define QM_RL_INC_VAL(rate) ({ \
+ typeof(rate) __rate = (rate); \
+ max_t(u32, \
+ (u32)(((__rate ? __rate : 1000000) * QM_RL_PERIOD * 101) / \
+ (8 * 100)), \
+ 1); })
+
+/* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
+#define QM_PF_RL_UPPER_BOUND 62500000
+
+/* Max PF RL increment value is 0.7 * upper bound */
+#define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10)
+
+/* Vport RL Upper bound, link speed is in Mpbs */
+#define QM_VP_RL_UPPER_BOUND(speed) ((u32)max_t(u32, \
+ QM_RL_INC_VAL(speed), \
+ 9700 + 1000))
+
+/* Max Vport RL increment value is the Vport RL upper bound */
+#define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed)
+
+/* Vport RL credit threshold in case of QM bypass */
+#define QM_VP_RL_BYPASS_THRESH_SPEED (QM_VP_RL_UPPER_BOUND(10000) - 1)
+
/* AFullOprtnstcCrdMask constants */
-#define QM_OPPOR_LINE_VOQ_DEF 1
-#define QM_OPPOR_FW_STOP_DEF 0
-#define QM_OPPOR_PQ_EMPTY_DEF 1
+#define QM_OPPOR_LINE_VOQ_DEF 1
+#define QM_OPPOR_FW_STOP_DEF 0
+#define QM_OPPOR_PQ_EMPTY_DEF 1
+
/* Command Queue constants */
-#define PBF_CMDQ_PURE_LB_LINES 150
-#define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \
- PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
- (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
- PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
-#define PBF_BTB_GUARANTEED_RT_OFFSET(voq) ( \
- PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
- (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
- PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
-#define QM_VOQ_LINE_CRD(pbf_cmd_lines) ((((pbf_cmd_lines) - \
- 4) * \
- 2) | QM_LINE_CRD_REG_SIGN_BIT)
+
+/* Pure LB CmdQ lines (+spare) */
+#define PBF_CMDQ_PURE_LB_LINES 150
+
+#define PBF_CMDQ_LINES_E5_RSVD_RATIO 8
+
+#define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
+ (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
+ (ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
+ PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
+
+#define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
+ (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
+ (ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
+ PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+
+#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
+ ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
+
/* BTB: blocks constants (block size = 256B) */
-#define BTB_JUMBO_PKT_BLOCKS 38
-#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
-#define BTB_PURE_LB_FACTOR 10
-#define BTB_PURE_LB_RATIO 7
+
+/* 256B blocks in 9700B packet */
+#define BTB_JUMBO_PKT_BLOCKS 38
+
+/* Headroom per-port */
+#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
+#define BTB_PURE_LB_FACTOR 10
+
+/* Factored (hence really 0.7) */
+#define BTB_PURE_LB_RATIO 7
+
/* QM stop command constants */
-#define QM_STOP_PQ_MASK_WIDTH 32
-#define QM_STOP_CMD_ADDR 2
-#define QM_STOP_CMD_STRUCT_SIZE 2
-#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
-#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
-#define QM_STOP_CMD_PAUSE_MASK_MASK -1
-#define QM_STOP_CMD_GROUP_ID_OFFSET 1
-#define QM_STOP_CMD_GROUP_ID_SHIFT 16
-#define QM_STOP_CMD_GROUP_ID_MASK 15
-#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
-#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
-#define QM_STOP_CMD_PQ_TYPE_MASK 1
-#define QM_STOP_CMD_MAX_POLL_COUNT 100
-#define QM_STOP_CMD_POLL_PERIOD_US 500
+#define QM_STOP_PQ_MASK_WIDTH 32
+#define QM_STOP_CMD_ADDR 2
+#define QM_STOP_CMD_STRUCT_SIZE 2
+#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
+#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
+#define QM_STOP_CMD_PAUSE_MASK_MASK -1
+#define QM_STOP_CMD_GROUP_ID_OFFSET 1
+#define QM_STOP_CMD_GROUP_ID_SHIFT 16
+#define QM_STOP_CMD_GROUP_ID_MASK 15
+#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
+#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
+#define QM_STOP_CMD_PQ_TYPE_MASK 1
+#define QM_STOP_CMD_MAX_POLL_COUNT 100
+#define QM_STOP_CMD_POLL_PERIOD_US 500
/* QM command macros */
-#define QM_CMD_STRUCT_SIZE(cmd) cmd ## \
- _STRUCT_SIZE
-#define QM_CMD_SET_FIELD(var, cmd, field, \
- value) SET_FIELD(var[cmd ## _ ## field ## \
- _OFFSET], \
- cmd ## _ ## field, \
- value)
-/* QM: VOQ macros */
-#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) ((port) * \
- (max_phys_tcs_per_port) + \
- (tc))
-#define LB_VOQ(port) ( \
- MAX_PHYS_VOQS + (port))
-#define VOQ(port, tc, max_phy_tcs_pr_port) \
- ((tc) < \
- LB_TC ? PHYS_VOQ(port, \
- tc, \
- max_phy_tcs_pr_port) \
- : LB_VOQ(port))
+#define QM_CMD_STRUCT_SIZE(cmd) cmd ## _STRUCT_SIZE
+#define QM_CMD_SET_FIELD(var, cmd, field, value) \
+ SET_FIELD(var[cmd ## _ ## field ## _OFFSET], \
+ cmd ## _ ## field, \
+ value)
+
+#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, vp_pq_id, rl_id, \
+ ext_voq, wrr) \
+ do { \
+ typeof(map) __map; \
+ memset(&__map, 0, sizeof(__map)); \
+ SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _PQ_VALID, 1); \
+ SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_VALID, \
+ rl_valid); \
+ SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VP_PQ_ID, \
+ vp_pq_id); \
+ SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_ID, rl_id); \
+ SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VOQ, ext_voq); \
+ SET_FIELD(__map.reg, \
+ QM_RF_PQ_MAP_ ## chip ## _WRR_WEIGHT_GROUP, wrr); \
+ STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \
+ *((u32 *)&__map)); \
+ (map) = __map; \
+ } while (0)
+
+#define WRITE_PQ_INFO_TO_RAM 1
+#define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
+ (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \
+ ((rl_valid) << 22) | ((rl) << 24))
+#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
+ (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4)
+
/******************** INTERNAL IMPLEMENTATION *********************/
+
+/* Returns the external VOQ number */
+static u8 qed_get_ext_voq(struct qed_hwfn *p_hwfn,
+ u8 port_id, u8 tc, u8 max_phys_tcs_per_port)
+{
+ if (tc == PURE_LB_TC)
+ return NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB + port_id;
+ else
+ return port_id * max_phys_tcs_per_port + tc;
+}
+
/* Prepare PF RL enable/disable runtime init values */
static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
if (pf_rl_en) {
+ u8 num_ext_voqs = MAX_NUM_VOQS_E4;
+ u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
+
/* Enable RLs for all VOQs */
- STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
- (1 << MAX_NUM_VOQS) - 1);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLPFVOQENABLE_RT_OFFSET,
+ (u32)voq_bit_mask);
+ if (num_ext_voqs >= 32)
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,
+ (u32)(voq_bit_mask >> 32));
+
/* Write RL period */
STORE_RT_REG(p_hwfn,
QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
@@ -147,7 +243,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
- QM_RL_UPPER_BOUND);
+ QM_PF_RL_UPPER_BOUND);
}
}
@@ -181,7 +277,7 @@ static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en)
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
- QM_RL_UPPER_BOUND);
+ QM_VP_RL_BYPASS_THRESH_SPEED);
}
}
@@ -202,15 +298,15 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
* the specified VOQ.
*/
static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
- u8 voq, u16 cmdq_lines)
+ u8 ext_voq, u16 cmdq_lines)
{
- u32 qm_line_crd;
+ u32 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
- qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
- OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
+ OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq),
(u32)cmdq_lines);
- STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
- STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
+ STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq,
+ qm_line_crd);
+ STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq,
qm_line_crd);
}
@@ -221,43 +317,52 @@ static void qed_cmdq_lines_rt_init(
u8 max_phys_tcs_per_port,
struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
- u8 tc, voq, port_id, num_tcs_in_port;
+ u8 tc, ext_voq, port_id, num_tcs_in_port;
+ u8 num_ext_voqs = MAX_NUM_VOQS_E4;
+
+ /* Clear PBF lines of all VOQs */
+ for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
+ STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
- /* Clear PBF lines for all VOQs */
- for (voq = 0; voq < MAX_NUM_VOQS; voq++)
- STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
- if (port_params[port_id].active) {
- u16 phys_lines, phys_lines_per_tc;
-
- /* find #lines to divide between active phys TCs */
- phys_lines = port_params[port_id].num_pbf_cmd_lines -
- PBF_CMDQ_PURE_LB_LINES;
- /* find #lines per active physical TC */
- num_tcs_in_port = 0;
- for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
- if (((port_params[port_id].active_phys_tcs >>
- tc) & 0x1) == 1)
- num_tcs_in_port++;
- }
+ u16 phys_lines, phys_lines_per_tc;
- phys_lines_per_tc = phys_lines / num_tcs_in_port;
- /* init registers per active TC */
- for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
- if (((port_params[port_id].active_phys_tcs >>
- tc) & 0x1) != 1)
- continue;
+ if (!port_params[port_id].active)
+ continue;
- voq = PHYS_VOQ(port_id, tc,
- max_phys_tcs_per_port);
- qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
- phys_lines_per_tc);
- }
+ /* Find number of command queue lines to divide between the
+ * active physical TCs. In E5, 1/8 of the lines are reserved.
+ * the lines for pure LB TC are subtracted.
+ */
+ phys_lines = port_params[port_id].num_pbf_cmd_lines;
+ phys_lines -= PBF_CMDQ_PURE_LB_LINES;
+
+ /* Find #lines per active physical TC */
+ num_tcs_in_port = 0;
+ for (tc = 0; tc < max_phys_tcs_per_port; tc++)
+ if (((port_params[port_id].active_phys_tcs >>
+ tc) & 0x1) == 1)
+ num_tcs_in_port++;
+ phys_lines_per_tc = phys_lines / num_tcs_in_port;
- /* init registers for pure LB TC */
- qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
- PBF_CMDQ_PURE_LB_LINES);
+ /* Init registers per active TC */
+ for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
+ ext_voq = qed_get_ext_voq(p_hwfn,
+ port_id,
+ tc, max_phys_tcs_per_port);
+ if (((port_params[port_id].active_phys_tcs >>
+ tc) & 0x1) == 1)
+ qed_cmdq_lines_voq_rt_init(p_hwfn,
+ ext_voq,
+ phys_lines_per_tc);
}
+
+ /* Init registers for pure LB TC */
+ ext_voq = qed_get_ext_voq(p_hwfn,
+ port_id,
+ PURE_LB_TC, max_phys_tcs_per_port);
+ qed_cmdq_lines_voq_rt_init(p_hwfn,
+ ext_voq, PBF_CMDQ_PURE_LB_LINES);
}
}
@@ -268,11 +373,9 @@ static void qed_btb_blocks_rt_init(
struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
u32 usable_blocks, pure_lb_blocks, phys_blocks;
- u8 tc, voq, port_id, num_tcs_in_port;
+ u8 tc, ext_voq, port_id, num_tcs_in_port;
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
- u32 temp;
-
if (!port_params[port_id].active)
continue;
@@ -280,13 +383,14 @@ static void qed_btb_blocks_rt_init(
usable_blocks = port_params[port_id].num_btb_blocks -
BTB_HEADROOM_BLOCKS;
- /* find blocks per physical TC */
+ /* Find blocks per physical TC. Use factor to avoid floating
+ * arithmethic.
+ */
num_tcs_in_port = 0;
- for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
if (((port_params[port_id].active_phys_tcs >>
tc) & 0x1) == 1)
num_tcs_in_port++;
- }
pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
(num_tcs_in_port * BTB_PURE_LB_FACTOR +
@@ -299,47 +403,55 @@ static void qed_btb_blocks_rt_init(
/* Init physical TCs */
for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
if (((port_params[port_id].active_phys_tcs >>
- tc) & 0x1) != 1)
- continue;
-
- voq = PHYS_VOQ(port_id, tc,
- max_phys_tcs_per_port);
- STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
- phys_blocks);
+ tc) & 0x1) == 1) {
+ ext_voq =
+ qed_get_ext_voq(p_hwfn,
+ port_id,
+ tc,
+ max_phys_tcs_per_port);
+ STORE_RT_REG(p_hwfn,
+ PBF_BTB_GUARANTEED_RT_OFFSET
+ (ext_voq), phys_blocks);
+ }
}
/* Init pure LB TC */
- temp = LB_VOQ(port_id);
- STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp),
+ ext_voq = qed_get_ext_voq(p_hwfn,
+ port_id,
+ PURE_LB_TC, max_phys_tcs_per_port);
+ STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
pure_lb_blocks);
}
}
/* Prepare Tx PQ mapping runtime init values for the specified PF */
-static void qed_tx_pq_map_rt_init(
- struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_qm_pf_rt_init_params *p_params,
- u32 base_mem_addr_4kb)
+static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params,
+ u32 base_mem_addr_4kb)
{
- struct init_qm_vport_params *vport_params = p_params->vport_params;
- u16 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
- u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
- u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
- QM_PF_QUEUE_GROUP_SIZE;
- u16 i, pq_id, pq_group;
-
- /* A bit per Tx PQ indicating if the PQ is associated with a VF */
u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
+ struct init_qm_vport_params *vport_params = p_params->vport_params;
u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
- u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
- u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
- u32 mem_addr_4kb = base_mem_addr_4kb;
+ u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;
+ struct init_qm_pq_params *pq_params = p_params->pq_params;
+ u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
+
+ num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
+
+ first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
+ last_pq_group = (p_params->start_pq + num_pqs - 1) /
+ QM_PF_QUEUE_GROUP_SIZE;
+
+ pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
+ vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
+ mem_addr_4kb = base_mem_addr_4kb;
/* Set mapping from PQ group to PF */
for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
(u32)(p_params->pf_id));
+
/* Set PQ sizes */
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
QM_PQ_SIZE_256B(p_params->num_pf_cids));
@@ -348,58 +460,82 @@ static void qed_tx_pq_map_rt_init(
/* Go over all Tx PQs */
for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
- u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
- p_params->max_phys_tcs_per_port);
- bool is_vf_pq = (i >= p_params->num_pf_pqs);
- struct qm_rf_pq_map tx_pq_map;
-
- bool rl_valid = p_params->pq_params[i].rl_valid &&
- (p_params->pq_params[i].vport_id <
- MAX_QM_GLOBAL_RLS);
+ u8 ext_voq, vport_id_in_pf, tc_id = pq_params[i].tc_id;
+ u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
+ struct qm_rf_pq_map_e4 tx_pq_map;
+ bool is_vf_pq, rl_valid;
+ u16 *p_first_tx_pq_id;
+
+ ext_voq = qed_get_ext_voq(p_hwfn,
+ p_params->port_id,
+ tc_id,
+ p_params->max_phys_tcs_per_port);
+ is_vf_pq = (i >= p_params->num_pf_pqs);
+ rl_valid = pq_params[i].rl_valid &&
+ pq_params[i].vport_id < max_qm_global_rls;
/* Update first Tx PQ of VPORT/TC */
- u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
- p_params->start_vport;
- u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0];
- u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id];
+ vport_id_in_pf = pq_params[i].vport_id - p_params->start_vport;
+ p_first_tx_pq_id =
+ &vport_params[vport_id_in_pf].first_tx_pq_id[tc_id];
+ if (*p_first_tx_pq_id == QM_INVALID_PQ_ID) {
+ u32 map_val =
+ (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
+ (p_params->pf_id << QM_WFQ_VP_PQ_PF_E4_SHIFT);
- if (first_tx_pq_id == QM_INVALID_PQ_ID) {
/* Create new VP PQ */
- pq_ids[p_params->pq_params[i].tc_id] = pq_id;
- first_tx_pq_id = pq_id;
+ *p_first_tx_pq_id = pq_id;
/* Map VP PQ to VOQ and PF */
STORE_RT_REG(p_hwfn,
QM_REG_WFQVPMAP_RT_OFFSET +
- first_tx_pq_id,
- (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
- (p_params->pf_id <<
- QM_WFQ_VP_PQ_PF_SHIFT));
+ *p_first_tx_pq_id,
+ map_val);
}
- if (p_params->pq_params[i].rl_valid && !rl_valid)
+ /* Check RL ID */
+ if (pq_params[i].rl_valid && pq_params[i].vport_id >=
+ max_qm_global_rls)
DP_NOTICE(p_hwfn,
- "Invalid VPORT ID for rate limiter configuration");
- /* Fill PQ map entry */
- memset(&tx_pq_map, 0, sizeof(tx_pq_map));
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
- SET_FIELD(tx_pq_map.reg,
- QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0);
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
- rl_valid ?
- p_params->pq_params[i].vport_id : 0);
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
- p_params->pq_params[i].wrr_group);
- /* Write PQ map entry to CAM */
- STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
- *((u32 *)&tx_pq_map));
- /* Set base address */
+ "Invalid VPORT ID for rate limiter configuration\n");
+
+ /* Prepare PQ map entry */
+ QM_INIT_TX_PQ_MAP(p_hwfn,
+ tx_pq_map,
+ E4,
+ pq_id,
+ rl_valid ? 1 : 0,
+ *p_first_tx_pq_id,
+ rl_valid ? pq_params[i].vport_id : 0,
+ ext_voq, pq_params[i].wrr_group);
+
+ /* Set PQ base address */
STORE_RT_REG(p_hwfn,
QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
mem_addr_4kb);
+ /* Clear PQ pointer table entry (64 bit) */
+ if (p_params->is_pf_loading)
+ for (j = 0; j < 2; j++)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_PTRTBLTX_RT_OFFSET +
+ (pq_id * 2) + j, 0);
+
+ /* Write PQ info to RAM */
+ if (WRITE_PQ_INFO_TO_RAM != 0) {
+ u32 pq_info = 0;
+
+ pq_info = PQ_INFO_ELEMENT(*p_first_tx_pq_id,
+ p_params->pf_id,
+ tc_id,
+ p_params->port_id,
+ rl_valid ? 1 : 0,
+ rl_valid ?
+ pq_params[i].vport_id : 0);
+ qed_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
+ pq_info);
+ }
+
/* If VF PQ, add indication to PQ VF mask */
if (is_vf_pq) {
tx_pq_vf_mask[pq_id /
@@ -421,16 +557,16 @@ static void qed_tx_pq_map_rt_init(
/* Prepare Other PQ mapping runtime init values for the specified PF */
static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
- u8 port_id,
u8 pf_id,
+ bool is_pf_loading,
u32 num_pf_cids,
u32 num_tids, u32 base_mem_addr_4kb)
{
u32 pq_size, pq_mem_4kb, mem_addr_4kb;
- u16 i, pq_id, pq_group;
+ u16 i, j, pq_id, pq_group;
- /* a single other PQ group is used in each PF,
- * where PQ group i is used in PF i.
+ /* A single other PQ group is used in each PF, where PQ group i is used
+ * in PF i.
*/
pq_group = pf_id;
pq_size = num_pf_cids + num_tids;
@@ -440,16 +576,25 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
/* Map PQ group to PF */
STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
(u32)(pf_id));
+
/* Set PQ sizes */
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
QM_PQ_SIZE_256B(pq_size));
- /* Set base address */
for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
+ /* Set PQ base address */
STORE_RT_REG(p_hwfn,
QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
mem_addr_4kb);
+
+ /* Clear PQ pointer table entry */
+ if (is_pf_loading)
+ for (j = 0; j < 2; j++)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_PTRTBLOTHER_RT_OFFSET +
+ (pq_id * 2) + j, 0);
+
mem_addr_4kb += pq_mem_4kb;
}
}
@@ -461,16 +606,11 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
struct qed_qm_pf_rt_init_params *p_params)
{
u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
- u32 crd_reg_offset;
- u32 inc_val;
+ struct init_qm_pq_params *pq_params = p_params->pq_params;
+ u32 inc_val, crd_reg_offset;
+ u8 ext_voq;
u16 i;
- if (p_params->pf_id < MAX_NUM_PFS_BB)
- crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
- else
- crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET;
- crd_reg_offset += p_params->pf_id % MAX_NUM_PFS_BB;
-
inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
@@ -478,19 +618,26 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
}
for (i = 0; i < num_tx_pqs; i++) {
- u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
- p_params->max_phys_tcs_per_port);
-
+ ext_voq = qed_get_ext_voq(p_hwfn,
+ p_params->port_id,
+ pq_params[i].tc_id,
+ p_params->max_phys_tcs_per_port);
+ crd_reg_offset =
+ (p_params->pf_id < MAX_NUM_PFS_BB ?
+ QM_REG_WFQPFCRD_RT_OFFSET :
+ QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
+ ext_voq * MAX_NUM_PFS_BB +
+ (p_params->pf_id % MAX_NUM_PFS_BB);
OVERWRITE_RT_REG(p_hwfn,
- crd_reg_offset + voq * MAX_NUM_PFS_BB,
- QM_WFQ_CRD_REG_SIGN_BIT);
+ crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT);
}
STORE_RT_REG(p_hwfn,
QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
- QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
+ QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
inc_val);
+
return 0;
}
@@ -501,15 +648,19 @@ static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
{
u32 inc_val = QM_RL_INC_VAL(pf_rl);
- if (inc_val > QM_RL_MAX_INC_VAL) {
+ if (inc_val > QM_PF_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
return -1;
}
- STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
- QM_RL_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
- QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLPFCRD_RT_OFFSET + pf_id,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
+ QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
+
return 0;
}
@@ -520,12 +671,12 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
u8 num_vports,
struct init_qm_vport_params *vport_params)
{
+ u16 vport_pq_id;
u32 inc_val;
u8 tc, i;
/* Go over all PF VPORTs */
for (i = 0; i < num_vports; i++) {
-
if (!vport_params[i].vport_wfq)
continue;
@@ -536,17 +687,14 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
return -1;
}
- /* each VPORT can have several VPORT PQ IDs for
- * different TCs
- */
+ /* Each VPORT can have several VPORT PQ IDs for various TCs */
for (tc = 0; tc < NUM_OF_TCS; tc++) {
- u16 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
-
+ vport_pq_id = vport_params[i].first_tx_pq_id[tc];
if (vport_pq_id != QM_INVALID_PQ_ID) {
STORE_RT_REG(p_hwfn,
QM_REG_WFQVPCRD_RT_OFFSET +
vport_pq_id,
- QM_WFQ_CRD_REG_SIGN_BIT);
+ (u32)QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn,
QM_REG_WFQVPWEIGHT_RT_OFFSET +
vport_pq_id, inc_val);
@@ -557,12 +705,17 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
return 0;
}
+/* Prepare VPORT RL runtime init values for the specified VPORTs.
+ * Return -1 on error.
+ */
static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
u8 start_vport,
u8 num_vports,
+ u32 link_speed,
struct init_qm_vport_params *vport_params)
{
u8 i, vport_id;
+ u32 inc_val;
if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
DP_NOTICE(p_hwfn,
@@ -572,22 +725,22 @@ static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
/* Go over all PF VPORTs */
for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
- u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
-
- if (inc_val > QM_RL_MAX_INC_VAL) {
+ inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ?
+ vport_params[i].vport_rl :
+ link_speed);
+ if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
DP_NOTICE(p_hwfn,
"Invalid VPORT rate-limit configuration\n");
return -1;
}
- STORE_RT_REG(p_hwfn,
- QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
- QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn,
QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
- QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn,
- QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
+ QM_VP_RL_UPPER_BOUND(link_speed) |
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
inc_val);
}
@@ -599,7 +752,7 @@ static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
{
u32 reg_val, i;
- for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
+ for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
i++) {
udelay(QM_STOP_CMD_POLL_PERIOD_US);
reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
@@ -632,8 +785,8 @@ static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
}
/******************** INTERFACE IMPLEMENTATION *********************/
-u32 qed_qm_pf_mem_size(u8 pf_id,
- u32 num_pf_cids,
+
+u32 qed_qm_pf_mem_size(u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
{
@@ -642,11 +795,10 @@ u32 qed_qm_pf_mem_size(u8 pf_id,
QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
}
-int qed_qm_common_rt_init(
- struct qed_hwfn *p_hwfn,
- struct qed_qm_common_rt_init_params *p_params)
+int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_qm_common_rt_init_params *p_params)
{
- /* init AFullOprtnstcCrdMask */
+ /* Init AFullOprtnstcCrdMask */
u32 mask = (QM_OPPOR_LINE_VOQ_DEF <<
QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
(QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
@@ -664,18 +816,31 @@ int qed_qm_common_rt_init(
QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
+
+ /* Enable/disable PF RL */
qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en);
+
+ /* Enable/disable PF WFQ */
qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
+
+ /* Enable/disable VPORT RL */
qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en);
+
+ /* Enable/disable VPORT WFQ */
qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
+
+ /* Init PBF CMDQ line credit */
qed_cmdq_lines_rt_init(p_hwfn,
p_params->max_ports_per_engine,
p_params->max_phys_tcs_per_port,
p_params->port_params);
+
+ /* Init BTB blocks in PBF */
qed_btb_blocks_rt_init(p_hwfn,
p_params->max_ports_per_engine,
p_params->max_phys_tcs_per_port,
p_params->port_params);
+
return 0;
}
@@ -695,24 +860,31 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
/* Map Other PQs (if any) */
- qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id,
- p_params->num_pf_cids, p_params->num_tids, 0);
+ qed_other_pq_map_rt_init(p_hwfn,
+ p_params->pf_id,
+ p_params->is_pf_loading, p_params->num_pf_cids,
+ p_params->num_tids, 0);
/* Map Tx PQs */
qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
+ /* Init PF WFQ */
if (p_params->pf_wfq)
if (qed_pf_wfq_rt_init(p_hwfn, p_params))
return -1;
+ /* Init PF RL */
if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
return -1;
+ /* Set VPORT WFQ */
if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
return -1;
+ /* Set VPORT RL */
if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
- p_params->num_vports, vport_params))
+ p_params->num_vports, p_params->link_speed,
+ vport_params))
return -1;
return 0;
@@ -729,6 +901,7 @@ int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
}
qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
+
return 0;
}
@@ -737,14 +910,13 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
{
u32 inc_val = QM_RL_INC_VAL(pf_rl);
- if (inc_val > QM_RL_MAX_INC_VAL) {
+ if (inc_val > QM_PF_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
return -1;
}
- qed_wr(p_hwfn, p_ptt,
- QM_REG_RLPFCRD + pf_id * 4,
- QM_RL_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn,
+ p_ptt, QM_REG_RLPFCRD + pf_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
return 0;
@@ -767,33 +939,35 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
for (tc = 0; tc < NUM_OF_TCS; tc++) {
vport_pq_id = first_tx_pq_id[tc];
if (vport_pq_id != QM_INVALID_PQ_ID)
- qed_wr(p_hwfn, p_ptt,
- QM_REG_WFQVPWEIGHT + vport_pq_id * 4,
- inc_val);
+ qed_wr(p_hwfn,
+ p_ptt,
+ QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
}
return 0;
}
int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl)
+ struct qed_ptt *p_ptt,
+ u8 vport_id, u32 vport_rl, u32 link_speed)
{
- u32 inc_val = QM_RL_INC_VAL(vport_rl);
+ u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
- if (vport_id >= MAX_QM_GLOBAL_RLS) {
+ if (vport_id >= max_qm_global_rls) {
DP_NOTICE(p_hwfn,
"Invalid VPORT ID for rate limiter configuration\n");
return -1;
}
- if (inc_val > QM_RL_MAX_INC_VAL) {
+ inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
+ if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration\n");
return -1;
}
- qed_wr(p_hwfn, p_ptt,
- QM_REG_RLGLBLCRD + vport_id * 4,
- QM_RL_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn,
+ p_ptt,
+ QM_REG_RLGLBLCRD + vport_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
return 0;
@@ -805,23 +979,27 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
bool is_tx_pq, u16 start_pq, u16 num_pqs)
{
u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
- u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
+ u32 pq_mask = 0, last_pq, pq_id;
+
+ last_pq = start_pq + num_pqs - 1;
/* Set command's PQ type */
QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
+ /* Go over requested PQs */
for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
/* Set PQ bit in mask (stop command only) */
if (!is_release_cmd)
- pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
+ pq_mask |= BIT((pq_id % QM_STOP_PQ_MASK_WIDTH));
/* If last PQ or end of PQ mask, write command */
if ((pq_id == last_pq) ||
(pq_id % QM_STOP_PQ_MASK_WIDTH ==
(QM_STOP_PQ_MASK_WIDTH - 1))) {
- QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
- PAUSE_MASK, pq_mask);
- QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
+ QM_CMD_SET_FIELD(cmd_arr,
+ QM_STOP_CMD, PAUSE_MASK, pq_mask);
+ QM_CMD_SET_FIELD(cmd_arr,
+ QM_STOP_CMD,
GROUP_ID,
pq_id / QM_STOP_PQ_MASK_WIDTH);
if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
@@ -834,87 +1012,103 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
return true;
}
-static void
-qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
-{
- if (enable)
- set_bit(bit, var);
- else
- clear_bit(bit, var);
-}
+#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
+ do { \
+ typeof(var) *__p_var = &(var); \
+ typeof(offset) __offset = offset; \
+ *__p_var = (*__p_var & ~BIT(__offset)) | \
+ ((enable) ? BIT(__offset) : 0); \
+ } while (0)
#define PRS_ETH_TUNN_FIC_FORMAT -188897008
void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 dest_port)
{
+ /* Update PRS register */
qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
+
+ /* Update NIG register */
qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
+
+ /* Update PBF register */
qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
}
void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool vxlan_enable)
{
- unsigned long reg_val = 0;
+ u32 reg_val;
u8 shift;
+ /* Update PRS register */
reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
- qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
-
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
-
if (reg_val)
- qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
- PRS_ETH_TUNN_FIC_FORMAT);
+ qed_wr(p_hwfn,
+ p_ptt,
+ PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_TUNN_FIC_FORMAT);
+ /* Update NIG register */
reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
- qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
-
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable);
qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
- qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
- vxlan_enable ? 1 : 0);
+ /* Update DORQ register */
+ qed_wr(p_hwfn,
+ p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, vxlan_enable ? 1 : 0);
}
-void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
bool eth_gre_enable, bool ip_gre_enable)
{
- unsigned long reg_val = 0;
+ u32 reg_val;
u8 shift;
+ /* Update PRS register */
reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
- qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
-
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable);
shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
- qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val)
- qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
- PRS_ETH_TUNN_FIC_FORMAT);
+ qed_wr(p_hwfn,
+ p_ptt,
+ PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_TUNN_FIC_FORMAT);
+ /* Update NIG register */
reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
- qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
-
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable);
shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
- qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable);
qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
- qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
- eth_gre_enable ? 1 : 0);
- qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
- ip_gre_enable ? 1 : 0);
+ /* Update DORQ registers */
+ qed_wr(p_hwfn,
+ p_ptt,
+ DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, eth_gre_enable ? 1 : 0);
+ qed_wr(p_hwfn,
+ p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, ip_gre_enable ? 1 : 0);
}
void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 dest_port)
{
+ /* Update PRS register */
qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
+
+ /* Update NIG register */
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
+
+ /* Update PBF register */
qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
}
@@ -922,32 +1116,39 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool eth_geneve_enable, bool ip_geneve_enable)
{
- unsigned long reg_val = 0;
+ u32 reg_val;
u8 shift;
+ /* Update PRS register */
reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
- qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_geneve_enable);
-
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_geneve_enable);
shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
- qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_geneve_enable);
-
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_geneve_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val)
- qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
- PRS_ETH_TUNN_FIC_FORMAT);
+ qed_wr(p_hwfn,
+ p_ptt,
+ PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_TUNN_FIC_FORMAT);
+ /* Update NIG register */
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
eth_geneve_enable ? 1 : 0);
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
- /* EDPM with geneve tunnel not supported in BB_B0 */
+ /* EDPM with geneve tunnel not supported in BB */
if (QED_IS_BB_B0(p_hwfn->cdev))
return;
- qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
+ /* Update DORQ registers */
+ qed_wr(p_hwfn,
+ p_ptt,
+ DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5,
eth_geneve_enable ? 1 : 0);
- qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
+ qed_wr(p_hwfn,
+ p_ptt,
+ DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5,
ip_geneve_enable ? 1 : 0);
}
@@ -959,117 +1160,297 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
#define RAM_LINE_SIZE sizeof(u64)
#define REG_SIZE sizeof(u32)
-void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u16 pf_id)
+void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
{
- u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM +
- pf_id * RAM_LINE_SIZE;
-
- /*stop using gft logic */
+ /* Disable gft search for PF */
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
- qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
+
+ /* Clean ram & cam for next gft session */
+
+ /* Zero camline */
qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
- qed_wr(p_hwfn, p_ptt, hw_addr, 0);
- qed_wr(p_hwfn, p_ptt, hw_addr + 4, 0);
+
+ /* Zero ramline */
+ qed_wr(p_hwfn,
+ p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, 0);
+ qed_wr(p_hwfn,
+ p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE,
+ 0);
}
-void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- u16 pf_id, bool tcp, bool udp,
- bool ipv4, bool ipv6)
+void qed_set_gft_event_id_cm_hdr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- union gft_cam_line_union camline;
- struct gft_ram_line ramline;
u32 rfs_cm_hdr_event_id;
+ /* Set RFS event ID to be awakened i Tstorm By Prs */
rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
+ rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
+ PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+ rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
+ PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+ qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+}
+
+void qed_gft_config(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 pf_id,
+ bool tcp,
+ bool udp,
+ bool ipv4, bool ipv6, enum gft_profile_type profile_type)
+{
+ u32 reg_val, cam_line, ram_line_lo, ram_line_hi;
if (!ipv6 && !ipv4)
DP_NOTICE(p_hwfn,
- "set_rfs_mode_enable: must accept at least on of - ipv4 or ipv6");
+ "gft_config: must accept at least on of - ipv4 or ipv6'\n");
if (!tcp && !udp)
DP_NOTICE(p_hwfn,
- "set_rfs_mode_enable: must accept at least on of - udp or tcp");
+ "gft_config: must accept at least on of - udp or tcp\n");
+ if (profile_type >= MAX_GFT_PROFILE_TYPE)
+ DP_NOTICE(p_hwfn, "gft_config: unsupported gft_profile_type\n");
- rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID <<
- PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
- rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR <<
- PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
- qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+ /* Set RFS event ID to be awakened i Tstorm By Prs */
+ reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID <<
+ PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+ reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+ qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
- /* Configure Registers for RFS mode */
- qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
+ /* Do not load context only cid in PRS on match. */
qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
- camline.cam_line_mapped.camline = 0;
- /* Cam line is now valid!! */
- SET_FIELD(camline.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_VALID, 1);
+ /* Do not use tenant ID exist bit for gft search */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0);
- /* filters are per PF!! */
- SET_FIELD(camline.cam_line_mapped.camline,
+ /* Set Cam */
+ cam_line = 0;
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
+
+ /* Filters are per PF!! */
+ SET_FIELD(cam_line,
GFT_CAM_LINE_MAPPED_PF_ID_MASK,
GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
- SET_FIELD(camline.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
+
if (!(tcp && udp)) {
- SET_FIELD(camline.cam_line_mapped.camline,
+ SET_FIELD(cam_line,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
if (tcp)
- SET_FIELD(camline.cam_line_mapped.camline,
+ SET_FIELD(cam_line,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
GFT_PROFILE_TCP_PROTOCOL);
else
- SET_FIELD(camline.cam_line_mapped.camline,
+ SET_FIELD(cam_line,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
GFT_PROFILE_UDP_PROTOCOL);
}
if (!(ipv4 && ipv6)) {
- SET_FIELD(camline.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
if (ipv4)
- SET_FIELD(camline.cam_line_mapped.camline,
+ SET_FIELD(cam_line,
GFT_CAM_LINE_MAPPED_IP_VERSION,
GFT_PROFILE_IPV4);
else
- SET_FIELD(camline.cam_line_mapped.camline,
+ SET_FIELD(cam_line,
GFT_CAM_LINE_MAPPED_IP_VERSION,
GFT_PROFILE_IPV6);
}
/* Write characteristics to cam */
qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
- camline.cam_line_mapped.camline);
- camline.cam_line_mapped.camline = qed_rd(p_hwfn, p_ptt,
- PRS_REG_GFT_CAM +
- CAM_LINE_SIZE * pf_id);
+ cam_line);
+ cam_line =
+ qed_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
/* Write line to RAM - compare to filter 4 tuple */
- ramline.lo = 0;
- ramline.hi = 0;
- SET_FIELD(ramline.hi, GFT_RAM_LINE_DST_IP, 1);
- SET_FIELD(ramline.hi, GFT_RAM_LINE_SRC_IP, 1);
- SET_FIELD(ramline.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
- SET_FIELD(ramline.lo, GFT_RAM_LINE_ETHERTYPE, 1);
- SET_FIELD(ramline.lo, GFT_RAM_LINE_SRC_PORT, 1);
- SET_FIELD(ramline.lo, GFT_RAM_LINE_DST_PORT, 1);
-
- /* Each iteration write to reg */
- qed_wr(p_hwfn, p_ptt,
+ ram_line_lo = 0;
+ ram_line_hi = 0;
+
+ if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+ } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+ } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_PORT) {
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ }
+
+ qed_wr(p_hwfn,
+ p_ptt,
PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
- ramline.lo);
- qed_wr(p_hwfn, p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + 4,
- ramline.hi);
+ ram_line_lo);
+ qed_wr(p_hwfn,
+ p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE,
+ ram_line_hi);
/* Set default profile so that no filter match will happen */
- qed_wr(p_hwfn, p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM +
- RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH,
- ramline.lo);
- qed_wr(p_hwfn, p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM +
- RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH + 4,
- ramline.hi);
+ qed_wr(p_hwfn,
+ p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+ PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
+ qed_wr(p_hwfn,
+ p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+ PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
+
+ /* Enable gft search */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
+}
+
+DECLARE_CRC8_TABLE(cdu_crc8_table);
+
+/* Calculate and return CDU validation byte per connection type/region/cid */
+static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
+{
+ const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
+ u8 crc, validation_byte = 0;
+ static u8 crc8_table_valid; /* automatically initialized to 0 */
+ u32 validation_string = 0;
+ u32 data_to_crc;
+
+ if (!crc8_table_valid) {
+ crc8_populate_msb(cdu_crc8_table, 0x07);
+ crc8_table_valid = 1;
+ }
+
+ /* The CRC is calculated on the String-to-compress:
+ * [31:8] = {CID[31:20],CID[11:0]}
+ * [7:4] = Region
+ * [3:0] = Type
+ */
+ if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
+ validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
+
+ if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
+ validation_string |= ((region & 0xF) << 4);
+
+ if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
+ validation_string |= (conn_type & 0xF);
+
+ /* Convert to big-endian and calculate CRC8 */
+ data_to_crc = be32_to_cpu(validation_string);
+
+ crc = crc8(cdu_crc8_table,
+ (u8 *)&data_to_crc, sizeof(data_to_crc), CRC8_INIT_VALUE);
+
+ /* The validation byte [7:0] is composed:
+ * for type A validation
+ * [7] = active configuration bit
+ * [6:0] = crc[6:0]
+ *
+ * for type B validation
+ * [7] = active configuration bit
+ * [6:3] = connection_type[3:0]
+ * [2:0] = crc[2:0]
+ */
+ validation_byte |=
+ ((validation_cfg >>
+ CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
+
+ if ((validation_cfg >>
+ CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
+ validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
+ else
+ validation_byte |= crc & 0x7F;
+
+ return validation_byte;
+}
+
+/* Calcualte and set validation bytes for session context */
+void qed_calc_session_ctx_validation(void *p_ctx_mem,
+ u16 ctx_size, u8 ctx_type, u32 cid)
+{
+ u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
+
+ p_ctx = (u8 * const)p_ctx_mem;
+ x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
+ t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
+ u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
+
+ memset(p_ctx, 0, ctx_size);
+
+ *x_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 3, cid);
+ *t_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 4, cid);
+ *u_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 5, cid);
+}
+
+/* Calcualte and set validation bytes for task context */
+void qed_calc_task_ctx_validation(void *p_ctx_mem,
+ u16 ctx_size, u8 ctx_type, u32 tid)
+{
+ u8 *p_ctx, *region1_val_ptr;
+
+ p_ctx = (u8 * const)p_ctx_mem;
+ region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
+
+ memset(p_ctx, 0, ctx_size);
+
+ *region1_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 1, tid);
+}
+
+/* Memset session context to 0 while preserving validation bytes */
+void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
+{
+ u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
+ u8 x_val, t_val, u_val;
+
+ p_ctx = (u8 * const)p_ctx_mem;
+ x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
+ t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
+ u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
+
+ x_val = *x_val_ptr;
+ t_val = *t_val_ptr;
+ u_val = *u_val_ptr;
+
+ memset(p_ctx, 0, ctx_size);
+
+ *x_val_ptr = x_val;
+ *t_val_ptr = t_val;
+ *u_val_ptr = u_val;
+}
+
+/* Memset task context to 0 while preserving validation bytes */
+void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
+{
+ u8 *p_ctx, *region1_val_ptr;
+ u8 region1_val;
+
+ p_ctx = (u8 * const)p_ctx_mem;
+ region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
+
+ region1_val = *region1_val_ptr;
+
+ memset(p_ctx, 0, ctx_size);
+
+ *region1_val_ptr = region1_val;
+}
+
+/* Enable and configure context validation */
+void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 ctx_validation;
+
+ /* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */
+ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
+ qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
+
+ /* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */
+ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+ qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
+
+ /* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */
+ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+ qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index e3f368882f46..3bb76da6baa2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -414,11 +414,23 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
}
/* init_ops callbacks entry point */
-static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct init_callback_op *p_cmd)
+static int qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct init_callback_op *p_cmd)
{
- DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n");
+ int rc;
+
+ switch (p_cmd->callback_id) {
+ case DMAE_READY_CB:
+ rc = qed_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unexpected init op callback ID %d\n",
+ p_cmd->callback_id);
+ return -EINVAL;
+ }
+
+ return rc;
}
static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
@@ -519,7 +531,7 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
break;
case INIT_OP_CALLBACK:
- qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
+ rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
break;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 719cdbfe1695..d3eabcf9c86c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -59,10 +59,10 @@ struct qed_pi_info {
};
struct qed_sb_sp_info {
- struct qed_sb_info sb_info;
+ struct qed_sb_info sb_info;
/* per protocol index data */
- struct qed_pi_info pi_info_arr[PIS_PER_SB];
+ struct qed_pi_info pi_info_arr[PIS_PER_SB_E4];
};
enum qed_attention_type {
@@ -82,7 +82,7 @@ struct aeu_invert_reg_bit {
#define ATTENTION_LENGTH_SHIFT (4)
#define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
ATTENTION_LENGTH_SHIFT)
-#define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT)
+#define ATTENTION_SINGLE BIT(ATTENTION_LENGTH_SHIFT)
#define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
#define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
ATTENTION_PARITY)
@@ -1313,7 +1313,7 @@ static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
if (IS_VF(p_hwfn->cdev))
return;
- sb_offset = igu_sb_id * PIS_PER_SB;
+ sb_offset = igu_sb_id * PIS_PER_SB_E4;
memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 5199634ed630..54b4ee0acfd7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -197,7 +197,7 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev);
#define QED_SB_EVENT_MASK 0x0003
#define SB_ALIGNED_SIZE(p_hwfn) \
- ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
+ ALIGNED_TYPE_SIZE(struct status_block_e4, p_hwfn)
#define QED_SB_INVALID_IDX 0xffff
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 813c77cc857f..c0d4a54a5edb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -62,22 +62,6 @@
#include "qed_sriov.h"
#include "qed_reg_addr.h"
-static int
-qed_iscsi_async_event(struct qed_hwfn *p_hwfn,
- u8 fw_event_code,
- u16 echo, union event_ring_data *data, u8 fw_return_code)
-{
- if (p_hwfn->p_iscsi_info->event_cb) {
- struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
-
- return p_iscsi->event_cb(p_iscsi->event_context,
- fw_event_code, data);
- } else {
- DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n");
- return -EINVAL;
- }
-}
-
struct qed_iscsi_conn {
struct list_head list_entry;
bool free_on_delete;
@@ -105,7 +89,7 @@ struct qed_iscsi_conn {
u8 local_mac[6];
u8 remote_mac[6];
u16 vlan_id;
- u8 tcp_flags;
+ u16 tcp_flags;
u8 ip_version;
u32 remote_ip[4];
u32 local_ip[4];
@@ -122,7 +106,6 @@ struct qed_iscsi_conn {
u32 ss_thresh;
u16 srtt;
u16 rtt_var;
- u32 ts_time;
u32 ts_recent;
u32 ts_recent_age;
u32 total_rt;
@@ -144,7 +127,6 @@ struct qed_iscsi_conn {
u16 mss;
u8 snd_wnd_scale;
u8 rcv_wnd_scale;
- u32 ts_ticks_per_second;
u16 da_timeout_value;
u8 ack_frequency;
@@ -162,6 +144,22 @@ struct qed_iscsi_conn {
};
static int
+qed_iscsi_async_event(struct qed_hwfn *p_hwfn,
+ u8 fw_event_code,
+ u16 echo, union event_ring_data *data, u8 fw_return_code)
+{
+ if (p_hwfn->p_iscsi_info->event_cb) {
+ struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
+
+ return p_iscsi->event_cb(p_iscsi->event_context,
+ fw_event_code, data);
+ } else {
+ DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n");
+ return -EINVAL;
+ }
+}
+
+static int
qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_addr,
@@ -214,9 +212,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
- p_init->ooo_enable = p_params->ooo_enable;
p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] +
p_params->ll2_ooo_queue_id;
+
p_init->func_params.log_page_size = p_params->log_page_size;
val = p_params->num_tasks;
p_init->func_params.num_tasks = cpu_to_le16(val);
@@ -276,7 +274,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(p_params->two_msl_timer);
val = p_params->tx_sws_timer;
p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(val);
- p_ramrod->tcp_init.maxfinrt = p_params->max_fin_rt;
+ p_ramrod->tcp_init.max_fin_rt = p_params->max_fin_rt;
p_hwfn->p_iscsi_info->event_context = event_context;
p_hwfn->p_iscsi_info->event_cb = async_event_cb;
@@ -304,8 +302,8 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
int rc = 0;
u32 dval;
u16 wval;
- u8 i;
u16 *p;
+ u8 i;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
@@ -371,7 +369,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id);
- p_tcp->flags = p_conn->tcp_flags;
+ p_tcp->flags = cpu_to_le16(p_conn->tcp_flags);
p_tcp->ip_version = p_conn->ip_version;
for (i = 0; i < 4; i++) {
dval = p_conn->remote_ip[i];
@@ -436,7 +434,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
p_tcp2->remote_mac_addr_lo = swab16(get_unaligned(p + 2));
p_tcp2->vlan_id = cpu_to_le16(p_conn->vlan_id);
- p_tcp2->flags = p_conn->tcp_flags;
+ p_tcp2->flags = cpu_to_le16(p_conn->tcp_flags);
p_tcp2->ip_version = p_conn->ip_version;
for (i = 0; i < 4; i++) {
@@ -458,6 +456,11 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
p_tcp2->syn_ip_payload_length = cpu_to_le16(wval);
p_tcp2->syn_phy_addr_lo = DMA_LO_LE(p_conn->syn_phy_addr);
p_tcp2->syn_phy_addr_hi = DMA_HI_LE(p_conn->syn_phy_addr);
+ p_tcp2->cwnd = cpu_to_le32(p_conn->cwnd);
+ p_tcp2->ka_max_probe_cnt = p_conn->ka_probe_cnt;
+ p_tcp2->ka_timeout = cpu_to_le32(p_conn->ka_timeout);
+ p_tcp2->max_rt_time = cpu_to_le32(p_conn->max_rt_time);
+ p_tcp2->ka_interval = cpu_to_le32(p_conn->ka_interval);
}
return qed_spq_post(p_hwfn, p_ent, NULL);
@@ -692,8 +695,7 @@ static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
}
}
-static int qed_iscsi_setup_connection(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_conn *p_conn)
+static int qed_iscsi_setup_connection(struct qed_iscsi_conn *p_conn)
{
if (!p_conn->queue_cnts_virt_addr)
goto nomem;
@@ -844,7 +846,7 @@ static int qed_iscsi_acquire_connection(struct qed_hwfn *p_hwfn,
rc = qed_iscsi_allocate_connection(p_hwfn, &p_conn);
if (!rc)
- rc = qed_iscsi_setup_connection(p_hwfn, p_conn);
+ rc = qed_iscsi_setup_connection(p_conn);
if (rc) {
spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
@@ -1294,7 +1296,6 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev,
con->ss_thresh = conn_info->ss_thresh;
con->srtt = conn_info->srtt;
con->rtt_var = conn_info->rtt_var;
- con->ts_time = conn_info->ts_time;
con->ts_recent = conn_info->ts_recent;
con->ts_recent_age = conn_info->ts_recent_age;
con->total_rt = conn_info->total_rt;
@@ -1316,7 +1317,6 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev,
con->mss = conn_info->mss;
con->snd_wnd_scale = conn_info->snd_wnd_scale;
con->rcv_wnd_scale = conn_info->rcv_wnd_scale;
- con->ts_ticks_per_second = conn_info->ts_ticks_per_second;
con->da_timeout_value = conn_info->da_timeout_value;
con->ack_frequency = conn_info->ack_frequency;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 409041eab189..ca4a81dc1ace 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -64,14 +64,21 @@ struct mpa_v2_hdr {
#define QED_IWARP_INVALID_TCP_CID 0xffffffff
#define QED_IWARP_RCV_WND_SIZE_DEF (256 * 1024)
-#define QED_IWARP_RCV_WND_SIZE_MIN (64 * 1024)
+#define QED_IWARP_RCV_WND_SIZE_MIN (0xffff)
#define TIMESTAMP_HEADER_SIZE (12)
+#define QED_IWARP_MAX_FIN_RT_DEFAULT (2)
#define QED_IWARP_TS_EN BIT(0)
#define QED_IWARP_DA_EN BIT(1)
#define QED_IWARP_PARAM_CRC_NEEDED (1)
#define QED_IWARP_PARAM_P2P (1)
+#define QED_IWARP_DEF_MAX_RT_TIME (0)
+#define QED_IWARP_DEF_CWND_FACTOR (4)
+#define QED_IWARP_DEF_KA_MAX_PROBE_CNT (5)
+#define QED_IWARP_DEF_KA_TIMEOUT (1200000) /* 20 min */
+#define QED_IWARP_DEF_KA_INTERVAL (1000) /* 1 sec */
+
static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code, u16 echo,
union event_ring_data *data,
@@ -120,11 +127,17 @@ static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
-void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
- struct iwarp_init_func_params *p_ramrod)
+void
+qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
+ struct iwarp_init_func_ramrod_data *p_ramrod)
{
- p_ramrod->ll2_ooo_q_index = RESC_START(p_hwfn, QED_LL2_QUEUE) +
- p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
+ p_ramrod->iwarp.ll2_ooo_q_index =
+ RESC_START(p_hwfn, QED_LL2_QUEUE) +
+ p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
+
+ p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
+
+ return;
}
static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
@@ -699,6 +712,12 @@ qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
tcp->ttl = 0x40;
tcp->tos_or_tc = 0;
+ tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME;
+ tcp->cwnd = QED_IWARP_DEF_CWND_FACTOR * tcp->mss;
+ tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT;
+ tcp->ka_timeout = QED_IWARP_DEF_KA_TIMEOUT;
+ tcp->ka_interval = QED_IWARP_DEF_KA_INTERVAL;
+
tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
tcp->connect_mode = ep->connect_mode;
@@ -807,6 +826,7 @@ static int
qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
{
struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
+ struct qed_iwarp_info *iwarp_info;
struct qed_sp_init_data init_data;
dma_addr_t async_output_phys;
struct qed_spq_entry *p_ent;
@@ -874,6 +894,8 @@ qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
p_mpa_ramrod->common.reject = 1;
}
+ iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+ p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size;
p_mpa_ramrod->mode = ep->mpa_rev;
SET_FIELD(p_mpa_ramrod->rtr_pref,
IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
@@ -2745,6 +2767,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
/* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
+ iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale;
iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
index c1ecd743305f..b8f612d00241 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
@@ -95,6 +95,7 @@ struct qed_iwarp_info {
spinlock_t iw_lock; /* for iwarp resources */
spinlock_t qp_lock; /* for teardown races */
u32 rcv_wnd_scale;
+ u16 rcv_wnd_size;
u16 max_mtu;
u8 mac_addr[ETH_ALEN];
u8 crc_needed;
@@ -187,7 +188,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
struct qed_rdma_start_in_params *params);
void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
- struct iwarp_init_func_params *p_ramrod);
+ struct iwarp_init_func_ramrod_data *p_ramrod);
int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 085338990f49..893ef08a4b39 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -223,10 +223,9 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid;
int rc;
- p_cid = vmalloc(sizeof(*p_cid));
+ p_cid = vzalloc(sizeof(*p_cid));
if (!p_cid)
return NULL;
- memset(p_cid, 0, sizeof(*p_cid));
p_cid->opaque_fid = opaque_fid;
p_cid->cid = cid;
@@ -1969,33 +1968,45 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
_qed_get_vport_stats(cdev, cdev->reset_stats);
}
-static void
-qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- struct qed_arfs_config_params *p_cfg_params)
+static enum gft_profile_type
+qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode)
{
- if (p_cfg_params->arfs_enable) {
- qed_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
- p_cfg_params->tcp, p_cfg_params->udp,
- p_cfg_params->ipv4, p_cfg_params->ipv6);
- DP_VERBOSE(p_hwfn, QED_MSG_SP,
- "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
+ if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE)
+ return GFT_PROFILE_TYPE_4_TUPLE;
+ if (mode == QED_FILTER_CONFIG_MODE_IP_DEST)
+ return GFT_PROFILE_TYPE_IP_DST_PORT;
+ return GFT_PROFILE_TYPE_L4_DST_PORT;
+}
+
+void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_arfs_config_params *p_cfg_params)
+{
+ if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
+ qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ p_cfg_params->tcp,
+ p_cfg_params->udp,
+ p_cfg_params->ipv4,
+ p_cfg_params->ipv6,
+ qed_arfs_mode_to_hsi(p_cfg_params->mode));
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n",
p_cfg_params->tcp ? "Enable" : "Disable",
p_cfg_params->udp ? "Enable" : "Disable",
p_cfg_params->ipv4 ? "Enable" : "Disable",
- p_cfg_params->ipv6 ? "Enable" : "Disable");
+ p_cfg_params->ipv6 ? "Enable" : "Disable",
+ (u32)p_cfg_params->mode);
} else {
- qed_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "Disabled Filtering\n");
+ qed_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
}
-
- DP_VERBOSE(p_hwfn, QED_MSG_SP, "Configured ARFS mode : %s\n",
- p_cfg_params->arfs_enable ? "Enable" : "Disable");
}
-static int
-qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+int
+qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
struct qed_spq_comp_cb *p_cb,
- dma_addr_t p_addr, u16 length, u16 qid,
- u8 vport_id, bool b_is_add)
+ struct qed_ntuple_filter_params *p_params)
{
struct rx_update_gft_filter_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -2004,13 +2015,15 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u8 abs_vport_id = 0;
int rc = -EINVAL;
- rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc)
return rc;
- rc = qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
- if (rc)
- return rc;
+ if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
+ rc = qed_fw_l2_queue(p_hwfn, p_params->qid, &abs_rx_q_id);
+ if (rc)
+ return rc;
+ }
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
@@ -2032,17 +2045,27 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
return rc;
p_ramrod = &p_ent->ramrod.rx_update_gft;
- DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
- p_ramrod->pkt_hdr_length = cpu_to_le16(length);
- p_ramrod->rx_qid_or_action_icid = cpu_to_le16(abs_rx_q_id);
- p_ramrod->vport_id = abs_vport_id;
- p_ramrod->filter_type = RFS_FILTER_TYPE;
- p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER;
+
+ DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr);
+ p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length);
+
+ if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
+ p_ramrod->rx_qid_valid = 1;
+ p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
+ }
+
+ p_ramrod->flow_id_valid = 0;
+ p_ramrod->flow_id = 0;
+
+ p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id);
+ p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER
+ : GFT_DELETE_FILTER;
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
abs_vport_id, abs_rx_q_id,
- b_is_add ? "Adding" : "Removing", (u64)p_addr, length);
+ p_params->b_is_add ? "Adding" : "Removing",
+ (u64)p_params->addr, p_params->length);
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -2743,7 +2766,8 @@ static int qed_configure_filter(struct qed_dev *cdev,
}
}
-static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
+static int qed_configure_arfs_searcher(struct qed_dev *cdev,
+ enum qed_filter_config_mode mode)
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_arfs_config_params arfs_config_params;
@@ -2753,8 +2777,7 @@ static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
arfs_config_params.udp = true;
arfs_config_params.ipv4 = true;
arfs_config_params.ipv6 = true;
- arfs_config_params.arfs_enable = en_searcher;
-
+ arfs_config_params.mode = mode;
qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
&arfs_config_params);
return 0;
@@ -2762,8 +2785,8 @@ static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
static void
qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
- void *cookie, union event_ring_data *data,
- u8 fw_return_code)
+ void *cookie,
+ union event_ring_data *data, u8 fw_return_code)
{
struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common;
void *dev = p_hwfn->cdev->ops_cookie;
@@ -2771,10 +2794,10 @@ qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
op->arfs_filter_op(dev, cookie, fw_return_code);
}
-static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
- dma_addr_t mapping, u16 length,
- u16 vport_id, u16 rx_queue_id,
- bool add_filter)
+static int
+qed_ntuple_arfs_filter_config(struct qed_dev *cdev,
+ void *cookie,
+ struct qed_ntuple_filter_params *params)
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_spq_comp_cb cb;
@@ -2783,9 +2806,19 @@ static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
cb.function = qed_arfs_sp_response_handler;
cb.cookie = cookie;
- rc = qed_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt,
- &cb, mapping, length, rx_queue_id,
- vport_id, add_filter);
+ if (params->b_is_vf) {
+ if (!qed_iov_is_valid_vfid(p_hwfn, params->vf_id, false,
+ false)) {
+ DP_INFO(p_hwfn, "vfid 0x%02x is out of bounds\n",
+ params->vf_id);
+ return rc;
+ }
+
+ params->vport_id = params->vf_id + 1;
+ params->qid = QED_RFS_NTUPLE_QID_RSS;
+ }
+
+ rc = qed_configure_rfs_ntuple_filter(p_hwfn, &cb, params);
if (rc)
DP_NOTICE(p_hwfn,
"Failed to issue a-RFS filter configuration\n");
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index cc1f248551c9..c4030e949cce 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -190,7 +190,7 @@ struct qed_arfs_config_params {
bool udp;
bool ipv4;
bool ipv6;
- bool arfs_enable;
+ enum qed_filter_config_mode mode;
};
struct qed_sp_vport_update_params {
@@ -277,6 +277,37 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
void qed_reset_vport_stats(struct qed_dev *cdev);
+/**
+ * *@brief qed_arfs_mode_configure -
+ *
+ **Enable or disable rfs mode. It must accept atleast one of tcp or udp true
+ **and atleast one of ipv4 or ipv6 true to enable rfs mode.
+ *
+ **@param p_hwfn
+ **@param p_ptt
+ **@param p_cfg_params - arfs mode configuration parameters.
+ *
+ */
+void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_arfs_config_params *p_cfg_params);
+
+/**
+ * @brief - qed_configure_rfs_ntuple_filter
+ *
+ * This ramrod should be used to add or remove arfs hw filter
+ *
+ * @params p_hwfn
+ * @params p_cb - Used for QED_SPQ_MODE_CB,where client would initialize
+ * it with cookie and callback function address, if not
+ * using this mode then client must pass NULL.
+ * @params p_params
+ */
+int
+qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
+ struct qed_spq_comp_cb *p_cb,
+ struct qed_ntuple_filter_params *p_params);
+
#define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8)
#define QED_QUEUE_CID_SELF (0xff)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 047f556ca62e..c4f14fdc4e77 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -406,6 +406,9 @@ static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn,
data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
+ data->qp_id = le16_to_cpu(p_cqe->rx_cqe_gsi.qp_id);
+
+ data->src_qp = le32_to_cpu(p_cqe->rx_cqe_gsi.src_qp);
}
static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
@@ -927,7 +930,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
qed_chain_get_pbl_phys(&p_rx->rcq_chain));
p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
- p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en;
+ p_ramrod->inner_vlan_stripping_en =
+ p_ll2_conn->input.rx_vlan_removal_en;
p_ramrod->queue_id = p_ll2_conn->queue_id;
p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0;
@@ -1299,8 +1303,20 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
- p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ?
- CORE_TX_DEST_NW : CORE_TX_DEST_LB;
+ switch (data->input.tx_dest) {
+ case QED_LL2_TX_DEST_NW:
+ p_ll2_info->tx_dest = CORE_TX_DEST_NW;
+ break;
+ case QED_LL2_TX_DEST_LB:
+ p_ll2_info->tx_dest = CORE_TX_DEST_LB;
+ break;
+ case QED_LL2_TX_DEST_DROP:
+ p_ll2_info->tx_dest = CORE_TX_DEST_DROP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
if (data->input.conn_type == QED_LL2_TYPE_OOO ||
data->input.secondary_queue)
p_ll2_info->main_func_queue = false;
@@ -2281,8 +2297,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
goto release_terminate;
}
- if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
- cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
+ if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI) {
DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
rc = qed_ll2_start_ooo(cdev, params);
if (rc) {
@@ -2340,8 +2355,7 @@ static int qed_ll2_stop(struct qed_dev *cdev)
qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
eth_zero_addr(cdev->ll2_mac_address);
- if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
- cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
+ if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI)
qed_ll2_stop_ooo(cdev);
rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 8b99c7d26f34..6f46cb11f349 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -2234,7 +2234,7 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
DRV_MSG_CODE_NVM_READ_NVRAM,
addr + offset +
(bytes_to_copy <<
- DRV_MB_PARAM_NVM_LEN_SHIFT),
+ DRV_MB_PARAM_NVM_LEN_OFFSET),
&resp, &resp_param,
&read_len,
(u32 *)(p_buf + offset));
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index b7abb8205d3a..5d040b873137 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -360,13 +360,13 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
static void qed_rdma_free_reserved_lkey(struct qed_hwfn *p_hwfn)
@@ -570,7 +570,7 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
qed_iwarp_init_fw_ramrod(p_hwfn,
- &p_ent->ramrod.iwarp_init_func.iwarp);
+ &p_ent->ramrod.iwarp_init_func);
p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma;
} else {
p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 0cdb4337b3a0..f7122059b6b5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -124,6 +124,8 @@
0x1f0434UL
#define PRS_REG_SEARCH_TAG1 \
0x1f0444UL
+#define PRS_REG_SEARCH_TENANT_ID \
+ 0x1f044cUL
#define PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST \
0x1f0a0cUL
#define PRS_REG_SEARCH_TCP_FIRST_FRAG \
@@ -200,7 +202,13 @@
0x2e8800UL
#define CCFC_REG_STRONG_ENABLE_VF \
0x2e070cUL
-#define CDU_REG_CID_ADDR_PARAMS \
+#define CDU_REG_CCFC_CTX_VALID0 \
+ 0x580400UL
+#define CDU_REG_CCFC_CTX_VALID1 \
+ 0x580404UL
+#define CDU_REG_TCFC_CTX_VALID0 \
+ 0x580408UL
+#define CDU_REG_CID_ADDR_PARAMS \
0x580900UL
#define DBG_REG_CLIENT_ENABLE \
0x010004UL
@@ -564,7 +572,7 @@
#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
#define PRS_REG_GRE_PROTOCOL 0x1f0734UL
#define PRS_REG_VXLAN_PORT 0x1f0738UL
-#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
+#define PRS_REG_OUTPUT_FORMAT_4_0_BB_K2 0x1f099cUL
#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL
#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE (0x1 << 0)
@@ -580,11 +588,11 @@
#define PRS_REG_NGE_PORT 0x1f086cUL
#define NIG_REG_NGE_PORT 0x508b38UL
-#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL
-#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL
-#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL
-#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN 0x10092cUL
-#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN 0x100930UL
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL
+#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5 0x10092cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5 0x100930UL
#define NIG_REG_NGE_IP_ENABLE 0x508b28UL
#define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL
@@ -595,15 +603,15 @@
#define QM_REG_WFQPFWEIGHT 0x2f4e80UL
#define QM_REG_WFQVPWEIGHT 0x2fa000UL
-#define PGLCS_REG_DBG_SELECT_K2 \
+#define PGLCS_REG_DBG_SELECT_K2_E5 \
0x001d14UL
-#define PGLCS_REG_DBG_DWORD_ENABLE_K2 \
+#define PGLCS_REG_DBG_DWORD_ENABLE_K2_E5 \
0x001d18UL
-#define PGLCS_REG_DBG_SHIFT_K2 \
+#define PGLCS_REG_DBG_SHIFT_K2_E5 \
0x001d1cUL
-#define PGLCS_REG_DBG_FORCE_VALID_K2 \
+#define PGLCS_REG_DBG_FORCE_VALID_K2_E5 \
0x001d20UL
-#define PGLCS_REG_DBG_FORCE_FRAME_K2 \
+#define PGLCS_REG_DBG_FORCE_FRAME_K2_E5 \
0x001d24UL
#define MISC_REG_RESET_PL_PDA_VMAIN_1 \
0x008070UL
@@ -615,7 +623,7 @@
0x009050UL
#define MISCS_REG_RESET_PL_HV \
0x009060UL
-#define MISCS_REG_RESET_PL_HV_2_K2 \
+#define MISCS_REG_RESET_PL_HV_2_K2_E5 \
0x009150UL
#define DMAE_REG_DBG_SELECT \
0x00c510UL
@@ -647,15 +655,15 @@
0x0500b0UL
#define GRC_REG_DBG_FORCE_FRAME \
0x0500b4UL
-#define UMAC_REG_DBG_SELECT_K2 \
+#define UMAC_REG_DBG_SELECT_K2_E5 \
0x051094UL
-#define UMAC_REG_DBG_DWORD_ENABLE_K2 \
+#define UMAC_REG_DBG_DWORD_ENABLE_K2_E5 \
0x051098UL
-#define UMAC_REG_DBG_SHIFT_K2 \
+#define UMAC_REG_DBG_SHIFT_K2_E5 \
0x05109cUL
-#define UMAC_REG_DBG_FORCE_VALID_K2 \
+#define UMAC_REG_DBG_FORCE_VALID_K2_E5 \
0x0510a0UL
-#define UMAC_REG_DBG_FORCE_FRAME_K2 \
+#define UMAC_REG_DBG_FORCE_FRAME_K2_E5 \
0x0510a4UL
#define MCP2_REG_DBG_SELECT \
0x052400UL
@@ -717,15 +725,15 @@
0x1f0ba0UL
#define PRS_REG_DBG_FORCE_FRAME \
0x1f0ba4UL
-#define CNIG_REG_DBG_SELECT_K2 \
+#define CNIG_REG_DBG_SELECT_K2_E5 \
0x218254UL
-#define CNIG_REG_DBG_DWORD_ENABLE_K2 \
+#define CNIG_REG_DBG_DWORD_ENABLE_K2_E5 \
0x218258UL
-#define CNIG_REG_DBG_SHIFT_K2 \
+#define CNIG_REG_DBG_SHIFT_K2_E5 \
0x21825cUL
-#define CNIG_REG_DBG_FORCE_VALID_K2 \
+#define CNIG_REG_DBG_FORCE_VALID_K2_E5 \
0x218260UL
-#define CNIG_REG_DBG_FORCE_FRAME_K2 \
+#define CNIG_REG_DBG_FORCE_FRAME_K2_E5 \
0x218264UL
#define PRM_REG_DBG_SELECT \
0x2306a8UL
@@ -997,35 +1005,35 @@
0x580710UL
#define CDU_REG_DBG_FORCE_FRAME \
0x580714UL
-#define WOL_REG_DBG_SELECT_K2 \
+#define WOL_REG_DBG_SELECT_K2_E5 \
0x600140UL
-#define WOL_REG_DBG_DWORD_ENABLE_K2 \
+#define WOL_REG_DBG_DWORD_ENABLE_K2_E5 \
0x600144UL
-#define WOL_REG_DBG_SHIFT_K2 \
+#define WOL_REG_DBG_SHIFT_K2_E5 \
0x600148UL
-#define WOL_REG_DBG_FORCE_VALID_K2 \
+#define WOL_REG_DBG_FORCE_VALID_K2_E5 \
0x60014cUL
-#define WOL_REG_DBG_FORCE_FRAME_K2 \
+#define WOL_REG_DBG_FORCE_FRAME_K2_E5 \
0x600150UL
-#define BMBN_REG_DBG_SELECT_K2 \
+#define BMBN_REG_DBG_SELECT_K2_E5 \
0x610140UL
-#define BMBN_REG_DBG_DWORD_ENABLE_K2 \
+#define BMBN_REG_DBG_DWORD_ENABLE_K2_E5 \
0x610144UL
-#define BMBN_REG_DBG_SHIFT_K2 \
+#define BMBN_REG_DBG_SHIFT_K2_E5 \
0x610148UL
-#define BMBN_REG_DBG_FORCE_VALID_K2 \
+#define BMBN_REG_DBG_FORCE_VALID_K2_E5 \
0x61014cUL
-#define BMBN_REG_DBG_FORCE_FRAME_K2 \
+#define BMBN_REG_DBG_FORCE_FRAME_K2_E5 \
0x610150UL
-#define NWM_REG_DBG_SELECT_K2 \
+#define NWM_REG_DBG_SELECT_K2_E5 \
0x8000ecUL
-#define NWM_REG_DBG_DWORD_ENABLE_K2 \
+#define NWM_REG_DBG_DWORD_ENABLE_K2_E5 \
0x8000f0UL
-#define NWM_REG_DBG_SHIFT_K2 \
+#define NWM_REG_DBG_SHIFT_K2_E5 \
0x8000f4UL
-#define NWM_REG_DBG_FORCE_VALID_K2 \
+#define NWM_REG_DBG_FORCE_VALID_K2_E5 \
0x8000f8UL
-#define NWM_REG_DBG_FORCE_FRAME_K2\
+#define NWM_REG_DBG_FORCE_FRAME_K2_E5 \
0x8000fcUL
#define PBF_REG_DBG_SELECT \
0xd80060UL
@@ -1247,36 +1255,76 @@
0x1901534UL
#define USEM_REG_DBG_FORCE_FRAME \
0x1901538UL
-#define NWS_REG_DBG_SELECT_K2 \
+#define NWS_REG_DBG_SELECT_K2_E5 \
0x700128UL
-#define NWS_REG_DBG_DWORD_ENABLE_K2 \
+#define NWS_REG_DBG_DWORD_ENABLE_K2_E5 \
0x70012cUL
-#define NWS_REG_DBG_SHIFT_K2 \
+#define NWS_REG_DBG_SHIFT_K2_E5 \
0x700130UL
-#define NWS_REG_DBG_FORCE_VALID_K2 \
+#define NWS_REG_DBG_FORCE_VALID_K2_E5 \
0x700134UL
-#define NWS_REG_DBG_FORCE_FRAME_K2 \
+#define NWS_REG_DBG_FORCE_FRAME_K2_E5 \
0x700138UL
-#define MS_REG_DBG_SELECT_K2 \
+#define MS_REG_DBG_SELECT_K2_E5 \
0x6a0228UL
-#define MS_REG_DBG_DWORD_ENABLE_K2 \
+#define MS_REG_DBG_DWORD_ENABLE_K2_E5 \
0x6a022cUL
-#define MS_REG_DBG_SHIFT_K2 \
+#define MS_REG_DBG_SHIFT_K2_E5 \
0x6a0230UL
-#define MS_REG_DBG_FORCE_VALID_K2 \
+#define MS_REG_DBG_FORCE_VALID_K2_E5 \
0x6a0234UL
-#define MS_REG_DBG_FORCE_FRAME_K2 \
+#define MS_REG_DBG_FORCE_FRAME_K2_E5 \
0x6a0238UL
-#define PCIE_REG_DBG_COMMON_SELECT_K2 \
+#define PCIE_REG_DBG_COMMON_SELECT_K2_E5 \
0x054398UL
-#define PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2 \
+#define PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5 \
0x05439cUL
-#define PCIE_REG_DBG_COMMON_SHIFT_K2 \
+#define PCIE_REG_DBG_COMMON_SHIFT_K2_E5 \
0x0543a0UL
-#define PCIE_REG_DBG_COMMON_FORCE_VALID_K2 \
+#define PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5 \
0x0543a4UL
-#define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2 \
+#define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5 \
0x0543a8UL
+#define PTLD_REG_DBG_SELECT_E5 \
+ 0x5a1600UL
+#define PTLD_REG_DBG_DWORD_ENABLE_E5 \
+ 0x5a1604UL
+#define PTLD_REG_DBG_SHIFT_E5 \
+ 0x5a1608UL
+#define PTLD_REG_DBG_FORCE_VALID_E5 \
+ 0x5a160cUL
+#define PTLD_REG_DBG_FORCE_FRAME_E5 \
+ 0x5a1610UL
+#define YPLD_REG_DBG_SELECT_E5 \
+ 0x5c1600UL
+#define YPLD_REG_DBG_DWORD_ENABLE_E5 \
+ 0x5c1604UL
+#define YPLD_REG_DBG_SHIFT_E5 \
+ 0x5c1608UL
+#define YPLD_REG_DBG_FORCE_VALID_E5 \
+ 0x5c160cUL
+#define YPLD_REG_DBG_FORCE_FRAME_E5 \
+ 0x5c1610UL
+#define RGSRC_REG_DBG_SELECT_E5 \
+ 0x320040UL
+#define RGSRC_REG_DBG_DWORD_ENABLE_E5 \
+ 0x320044UL
+#define RGSRC_REG_DBG_SHIFT_E5 \
+ 0x320048UL
+#define RGSRC_REG_DBG_FORCE_VALID_E5 \
+ 0x32004cUL
+#define RGSRC_REG_DBG_FORCE_FRAME_E5 \
+ 0x320050UL
+#define TGSRC_REG_DBG_SELECT_E5 \
+ 0x322040UL
+#define TGSRC_REG_DBG_DWORD_ENABLE_E5 \
+ 0x322044UL
+#define TGSRC_REG_DBG_SHIFT_E5 \
+ 0x322048UL
+#define TGSRC_REG_DBG_FORCE_VALID_E5 \
+ 0x32204cUL
+#define TGSRC_REG_DBG_FORCE_FRAME_E5 \
+ 0x322050UL
#define MISC_REG_RESET_PL_UA \
0x008050UL
#define MISC_REG_RESET_PL_HV \
@@ -1415,7 +1463,7 @@
0x1940000UL
#define SEM_FAST_REG_INT_RAM \
0x020000UL
-#define SEM_FAST_REG_INT_RAM_SIZE \
+#define SEM_FAST_REG_INT_RAM_SIZE_BB_K2 \
20480
#define GRC_REG_TRACE_FIFO_VALID_DATA \
0x050064UL
@@ -1433,6 +1481,8 @@
0x340800UL
#define BRB_REG_BIG_RAM_DATA \
0x341500UL
+#define BRB_REG_BIG_RAM_DATA_SIZE \
+ 64
#define SEM_FAST_REG_STALL_0_BB_K2 \
0x000488UL
#define SEM_FAST_REG_STALLED \
@@ -1451,7 +1501,7 @@
0x238c30UL
#define MISCS_REG_BLOCK_256B_EN \
0x009074UL
-#define MCP_REG_SCRATCH_SIZE \
+#define MCP_REG_SCRATCH_SIZE_BB_K2 \
57344
#define MCP_REG_CPU_REG_FILE \
0xe05200UL
@@ -1485,35 +1535,35 @@
0x008c14UL
#define NWS_REG_NWS_CMU_K2 \
0x720000UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5 \
0x000680UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5 \
0x000684UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5 \
0x0006c0UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5 \
0x0006c4UL
-#define MS_REG_MS_CMU_K2 \
+#define MS_REG_MS_CMU_K2_E5 \
0x6a4000UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \
0x000208UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \
0x00020cUL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \
0x000210UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \
0x000214UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \
0x000208UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \
0x00020cUL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \
0x000210UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \
0x000214UL
-#define PHY_PCIE_REG_PHY0_K2 \
+#define PHY_PCIE_REG_PHY0_K2_E5 \
0x620000UL
-#define PHY_PCIE_REG_PHY1_K2 \
+#define PHY_PCIE_REG_PHY1_K2_E5 \
0x624000UL
#define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL
#define PRS_REG_LIGHT_L2_ETHERTYPE_EN 0x1f0968UL
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index a1d33f35aad3..5e927b6cac22 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -351,7 +351,9 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
p_ramrod->mf_mode = MF_NPAR;
}
- p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
+
+ p_ramrod->outer_tag_config.outer_tag.tci =
+ cpu_to_le16(p_hwfn->hw_info.ovlan);
/* Place EQ address in RAMROD */
DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
@@ -396,8 +398,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
- "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
- sb, sb_index, p_ramrod->outer_tag);
+ "Setting event_ring_sb [id %04x index %02x], outer_tag.tci [%d]\n",
+ sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tci);
rc = qed_spq_post(p_hwfn, p_ent, NULL);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index cd9a0297ebc5..1673fc90027f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -213,7 +213,7 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
struct qed_spq *p_spq)
{
- struct core_conn_context *p_cxt;
+ struct e4_core_conn_context *p_cxt;
struct qed_cxt_info cxt_info;
u16 physical_q;
int rc;
@@ -231,11 +231,11 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
p_cxt = cxt_info.p_cxt;
SET_FIELD(p_cxt->xstorm_ag_context.flags10,
- XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
+ E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
SET_FIELD(p_cxt->xstorm_ag_context.flags1,
- XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
+ E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
SET_FIELD(p_cxt->xstorm_ag_context.flags9,
- XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+ E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
/* QM physical queue */
physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 3f40b1de7957..5acb91b3564c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -153,9 +153,9 @@ static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
-static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
- int rel_vf_id,
- bool b_enabled_only, bool b_non_malicious)
+bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+ int rel_vf_id,
+ bool b_enabled_only, bool b_non_malicious)
{
if (!p_hwfn->pf_iov_info) {
DP_NOTICE(p_hwfn->cdev, "No iov info\n");
@@ -1621,7 +1621,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
/* fill in pfdev info */
pfdev_info->chip_num = p_hwfn->cdev->chip_num;
pfdev_info->db_size = 0;
- pfdev_info->indices_per_sb = PIS_PER_SB;
+ pfdev_info->indices_per_sb = PIS_PER_SB_E4;
pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
@@ -3582,11 +3582,11 @@ static int
qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
{
- u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
+ u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
int i, cnt;
/* Read initial consumers & producers */
- for (i = 0; i < MAX_NUM_VOQS; i++) {
+ for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
u32 prod;
cons[i] = qed_rd(p_hwfn, p_ptt,
@@ -3601,7 +3601,7 @@ qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
/* Wait for consumers to pass the producers */
i = 0;
for (cnt = 0; cnt < 50; cnt++) {
- for (; i < MAX_NUM_VOQS; i++) {
+ for (; i < MAX_NUM_VOQS_E4; i++) {
u32 tmp;
tmp = qed_rd(p_hwfn, p_ptt,
@@ -3611,7 +3611,7 @@ qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
break;
}
- if (i == MAX_NUM_VOQS)
+ if (i == MAX_NUM_VOQS_E4)
break;
msleep(20);
@@ -4237,6 +4237,7 @@ qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int vfid, int val)
{
+ struct qed_mcp_link_state *p_link;
struct qed_vf_info *vf;
u8 abs_vp_id = 0;
int rc;
@@ -4249,7 +4250,10 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
+ p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
+
+ return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
+ p_link->speed);
}
static int
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index 3955929ba892..9a8fd79611f2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -274,6 +274,23 @@ enum qed_iov_wq_flag {
#ifdef CONFIG_QED_SRIOV
/**
+ * @brief Check if given VF ID @vfid is valid
+ * w.r.t. @b_enabled_only value
+ * if b_enabled_only = true - only enabled VF id is valid
+ * else any VF id less than max_vfs is valid
+ *
+ * @param p_hwfn
+ * @param rel_vf_id - Relative VF ID
+ * @param b_enabled_only - consider only enabled VF
+ * @param b_non_malicious - true iff we want to validate vf isn't malicious.
+ *
+ * @return bool - true for valid VF ID
+ */
+bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+ int rel_vf_id,
+ bool b_enabled_only, bool b_non_malicious);
+
+/**
* @brief - Given a VF index, return index of next [including that] active VF.
*
* @param p_hwfn
@@ -376,6 +393,13 @@ void qed_vf_start_iov_wq(struct qed_dev *cdev);
int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
#else
+static inline bool
+qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+ int rel_vf_id, bool b_enabled_only, bool b_non_malicious)
+{
+ return false;
+}
+
static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
u16 rel_vf_id)
{
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index a3a70ade411f..9935978c5542 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -40,6 +40,7 @@
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/bpf.h>
+#include <net/xdp.h>
#include <linux/qed/qede_rdma.h>
#include <linux/io.h>
#ifdef CONFIG_RFS_ACCEL
@@ -52,9 +53,9 @@
#include <linux/qed/qed_eth_if.h>
#define QEDE_MAJOR_VERSION 8
-#define QEDE_MINOR_VERSION 10
-#define QEDE_REVISION_VERSION 10
-#define QEDE_ENGINEERING_VERSION 21
+#define QEDE_MINOR_VERSION 33
+#define QEDE_REVISION_VERSION 0
+#define QEDE_ENGINEERING_VERSION 20
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
__stringify(QEDE_MINOR_VERSION) "." \
__stringify(QEDE_REVISION_VERSION) "." \
@@ -345,6 +346,7 @@ struct qede_rx_queue {
u64 xdp_no_pass;
void *handle;
+ struct xdp_rxq_info xdp_rxq;
};
union db_prod {
@@ -494,6 +496,8 @@ int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid);
void qede_vlan_mark_nonconfigured(struct qede_dev *edev);
int qede_configure_vlan_filters(struct qede_dev *edev);
+netdev_features_t qede_fix_features(struct net_device *dev,
+ netdev_features_t features);
int qede_set_features(struct net_device *dev, netdev_features_t features);
void qede_set_rx_mode(struct net_device *ndev);
void qede_config_rx_mode(struct net_device *ndev);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index dae741270022..4ca3847fffd4 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -940,6 +940,9 @@ int qede_change_mtu(struct net_device *ndev, int new_mtu)
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"Configuring MTU size of %d\n", new_mtu);
+ if (new_mtu > PAGE_SIZE)
+ ndev->features &= ~NETIF_F_GRO_HW;
+
/* Set the mtu field and re-start the interface if needed */
args.u.mtu = new_mtu;
args.func = &qede_update_mtu;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index c1a0708a7d7c..6687e04d1558 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -98,10 +98,18 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev,
u16 rxq_id, bool add_fltr)
{
const struct qed_eth_ops *op = edev->ops;
+ struct qed_ntuple_filter_params params;
if (n->used)
return;
+ memset(&params, 0, sizeof(params));
+
+ params.addr = n->mapping;
+ params.length = n->buf_len;
+ params.qid = rxq_id;
+ params.b_is_add = add_fltr;
+
DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
"%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
add_fltr ? "Adding" : "Deleting",
@@ -110,8 +118,7 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev,
n->used = true;
n->filter_op = add_fltr;
- op->ntuple_filter_config(edev->cdev, n, n->mapping, n->buf_len, 0,
- rxq_id, add_fltr);
+ op->ntuple_filter_config(edev->cdev, n, &params);
}
static void
@@ -141,7 +148,10 @@ qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev,
edev->arfs->filter_count++;
if (edev->arfs->filter_count == 1 && !edev->arfs->enable) {
- edev->ops->configure_arfs_searcher(edev->cdev, true);
+ enum qed_filter_config_mode mode;
+
+ mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
+ edev->ops->configure_arfs_searcher(edev->cdev, mode);
edev->arfs->enable = true;
}
@@ -160,8 +170,11 @@ qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev,
edev->arfs->filter_count--;
if (!edev->arfs->filter_count && edev->arfs->enable) {
+ enum qed_filter_config_mode mode;
+
+ mode = QED_FILTER_CONFIG_MODE_DISABLE;
edev->arfs->enable = false;
- edev->ops->configure_arfs_searcher(edev->cdev, false);
+ edev->ops->configure_arfs_searcher(edev->cdev, mode);
}
}
@@ -255,8 +268,11 @@ void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
if (!edev->arfs->filter_count) {
if (edev->arfs->enable) {
+ enum qed_filter_config_mode mode;
+
+ mode = QED_FILTER_CONFIG_MODE_DISABLE;
edev->arfs->enable = false;
- edev->ops->configure_arfs_searcher(edev->cdev, false);
+ edev->ops->configure_arfs_searcher(edev->cdev, mode);
}
#ifdef CONFIG_RFS_ACCEL
} else {
@@ -895,19 +911,26 @@ static void qede_set_features_reload(struct qede_dev *edev,
edev->ndev->features = args->u.features;
}
+netdev_features_t qede_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (edev->xdp_prog || edev->ndev->mtu > PAGE_SIZE ||
+ !(features & NETIF_F_GRO))
+ features &= ~NETIF_F_GRO_HW;
+
+ return features;
+}
+
int qede_set_features(struct net_device *dev, netdev_features_t features)
{
struct qede_dev *edev = netdev_priv(dev);
netdev_features_t changes = features ^ dev->features;
bool need_reload = false;
- /* No action needed if hardware GRO is disabled during driver load */
- if (changes & NETIF_F_GRO) {
- if (dev->features & NETIF_F_GRO)
- need_reload = !edev->gro_disable;
- else
- need_reload = edev->gro_disable;
- }
+ if (changes & NETIF_F_GRO_HW)
+ need_reload = true;
if (need_reload) {
struct qede_reload_args args;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 48ec4c56cddf..dafc079ab6b9 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1006,6 +1006,7 @@ static bool qede_rx_xdp(struct qede_dev *edev,
xdp.data = xdp.data_hard_start + *data_offset;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + *len;
+ xdp.rxq = &rxq->xdp_rxq;
/* Queues always have a full reset currently, so for the time
* being until there's atomic program replace just mark read
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 8f9b3eb82137..2db70eabddfe 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -545,6 +545,7 @@ static const struct net_device_ops qede_netdev_ops = {
#endif
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_fix_features = qede_fix_features,
.ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
#ifdef CONFIG_QED_SRIOV
@@ -572,6 +573,7 @@ static const struct net_device_ops qede_netdev_vf_ops = {
.ndo_change_mtu = qede_change_mtu,
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_fix_features = qede_fix_features,
.ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
.ndo_udp_tunnel_add = qede_udp_tunnel_add,
@@ -589,6 +591,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = {
.ndo_change_mtu = qede_change_mtu,
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_fix_features = qede_fix_features,
.ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
.ndo_udp_tunnel_add = qede_udp_tunnel_add,
@@ -676,7 +679,7 @@ static void qede_init_ndev(struct qede_dev *edev)
ndev->priv_flags |= IFF_UNICAST_FLT;
/* user-changeble features */
- hw_features = NETIF_F_GRO | NETIF_F_SG |
+ hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
@@ -762,6 +765,12 @@ static void qede_free_fp_array(struct qede_dev *edev)
fp = &edev->fp_array[i];
kfree(fp->sb_info);
+ /* Handle mem alloc failure case where qede_init_fp
+ * didn't register xdp_rxq_info yet.
+ * Implicit only (fp->type & QEDE_FASTPATH_RX)
+ */
+ if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq))
+ xdp_rxq_info_unreg(&fp->rxq->xdp_rxq);
kfree(fp->rxq);
kfree(fp->xdp_tx);
kfree(fp->txq);
@@ -1068,10 +1077,6 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
pci_set_drvdata(pdev, NULL);
- /* Release edev's reference to XDP's bpf if such exist */
- if (edev->xdp_prog)
- bpf_prog_put(edev->xdp_prog);
-
/* Use global ops since we've freed edev */
qed_ops->common->slowpath_stop(cdev);
if (system_state == SYSTEM_POWER_OFF)
@@ -1148,7 +1153,7 @@ static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
static int qede_alloc_mem_sb(struct qede_dev *edev,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block *sb_virt;
+ struct status_block_e4 *sb_virt;
dma_addr_t sb_phys;
int rc;
@@ -1232,18 +1237,9 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
dma_addr_t mapping;
int i;
- /* Don't perform FW aggregations in case of XDP */
- if (edev->xdp_prog)
- edev->gro_disable = 1;
-
if (edev->gro_disable)
return 0;
- if (edev->ndev->mtu > PAGE_SIZE) {
- edev->gro_disable = 1;
- return 0;
- }
-
for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
struct sw_rx_data *replace_buf = &tpa_info->buffer;
@@ -1273,6 +1269,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
err:
qede_free_sge_mem(edev, rxq);
edev->gro_disable = 1;
+ edev->ndev->features &= ~NETIF_F_GRO_HW;
return -ENOMEM;
}
@@ -1502,6 +1499,10 @@ static void qede_init_fp(struct qede_dev *edev)
else
fp->rxq->data_direction = DMA_FROM_DEVICE;
fp->rxq->dev = &edev->pdev->dev;
+
+ /* Driver have no error path from here */
+ WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
+ fp->rxq->rxq_id) < 0);
}
if (fp->type & QEDE_FASTPATH_TX) {
@@ -1515,7 +1516,7 @@ static void qede_init_fp(struct qede_dev *edev)
edev->ndev->name, queue_id);
}
- edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
+ edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
}
static int qede_set_real_num_queues(struct qede_dev *edev)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index f7080d0ab874..46b0372dd032 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -3891,7 +3891,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
struct list_head *head = &mbx->cmd_q;
struct qlcnic_cmd_args *cmd = NULL;
- spin_lock(&mbx->queue_lock);
+ spin_lock_bh(&mbx->queue_lock);
while (!list_empty(head)) {
cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
@@ -3902,7 +3902,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
qlcnic_83xx_notify_cmd_completion(adapter, cmd);
}
- spin_unlock(&mbx->queue_lock);
+ spin_unlock_bh(&mbx->queue_lock);
}
static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter)
@@ -3938,12 +3938,12 @@ static void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter,
{
struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
- spin_lock(&mbx->queue_lock);
+ spin_lock_bh(&mbx->queue_lock);
list_del(&cmd->list);
mbx->num_cmds--;
- spin_unlock(&mbx->queue_lock);
+ spin_unlock_bh(&mbx->queue_lock);
qlcnic_83xx_notify_cmd_completion(adapter, cmd);
}
@@ -4008,7 +4008,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
init_completion(&cmd->completion);
cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN;
- spin_lock(&mbx->queue_lock);
+ spin_lock_bh(&mbx->queue_lock);
list_add_tail(&cmd->list, &mbx->cmd_q);
mbx->num_cmds++;
@@ -4016,7 +4016,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
*timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT;
queue_work(mbx->work_q, &mbx->work);
- spin_unlock(&mbx->queue_lock);
+ spin_unlock_bh(&mbx->queue_lock);
return 0;
}
@@ -4112,15 +4112,15 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT;
spin_unlock_irqrestore(&mbx->aen_lock, flags);
- spin_lock(&mbx->queue_lock);
+ spin_lock_bh(&mbx->queue_lock);
if (list_empty(head)) {
- spin_unlock(&mbx->queue_lock);
+ spin_unlock_bh(&mbx->queue_lock);
return;
}
cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
- spin_unlock(&mbx->queue_lock);
+ spin_unlock_bh(&mbx->queue_lock);
mbx_ops->encode_cmd(adapter, cmd);
mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.h b/drivers/net/ethernet/qualcomm/emac/emac-mac.h
index 5028fb4bec2b..4beedb8faa1e 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.h
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.h
@@ -114,8 +114,9 @@ struct emac_tpd {
#define TPD_INSTC_SET(tpd, val) BITS_SET((tpd)->word[3], 17, 17, val)
/* High-14bit Buffer Address, So, the 64b-bit address is
* {DESC_CTRL_11_TX_DATA_HIADDR[17:0],(register) BUFFER_ADDR_H, BUFFER_ADDR_L}
+ * Extend TPD_BUFFER_ADDR_H to [31, 18], because we never enable timestamping.
*/
-#define TPD_BUFFER_ADDR_H_SET(tpd, val) BITS_SET((tpd)->word[3], 18, 30, val)
+#define TPD_BUFFER_ADDR_H_SET(tpd, val) BITS_SET((tpd)->word[3], 18, 31, val)
/* Format D. Word offset from the 1st byte of this packet to start to calculate
* the custom checksum.
*/
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 38c924bdd32e..13235baf4766 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -615,8 +615,11 @@ static int emac_probe(struct platform_device *pdev)
u32 reg;
int ret;
- /* The TPD buffer address is limited to 45 bits. */
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(45));
+ /* The TPD buffer address is limited to:
+ * 1. PTP: 45bits. (Driver doesn't support yet.)
+ * 2. NON-PTP: 46bits.
+ */
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(46));
if (ret) {
dev_err(&pdev->dev, "could not set DMA mask\n");
return ret;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index df21e900f874..7e7704daf5f1 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -143,11 +143,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
- int ingress_format = RMNET_INGRESS_FORMAT_DEMUXING |
- RMNET_INGRESS_FORMAT_DEAGGREGATION |
- RMNET_INGRESS_FORMAT_MAP;
- int egress_format = RMNET_EGRESS_FORMAT_MUXING |
- RMNET_EGRESS_FORMAT_MAP;
+ u32 data_format = RMNET_INGRESS_FORMAT_DEAGGREGATION;
struct net_device *real_dev;
int mode = RMNET_EPMODE_VND;
struct rmnet_endpoint *ep;
@@ -181,13 +177,20 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
if (err)
goto err2;
- netdev_dbg(dev, "data format [ingress 0x%08X] [egress 0x%08X]\n",
- ingress_format, egress_format);
- port->egress_data_format = egress_format;
- port->ingress_data_format = ingress_format;
port->rmnet_mode = mode;
hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
+
+ if (data[IFLA_VLAN_FLAGS]) {
+ struct ifla_vlan_flags *flags;
+
+ flags = nla_data(data[IFLA_VLAN_FLAGS]);
+ data_format = flags->flags & flags->mask;
+ }
+
+ netdev_dbg(dev, "data format [0x%08X]\n", data_format);
+ port->data_format = data_format;
+
return 0;
err2:
@@ -317,9 +320,49 @@ static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
+static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct rmnet_priv *priv = netdev_priv(dev);
+ struct net_device *real_dev;
+ struct rmnet_endpoint *ep;
+ struct rmnet_port *port;
+ u16 mux_id;
+
+ real_dev = __dev_get_by_index(dev_net(dev),
+ nla_get_u32(tb[IFLA_LINK]));
+
+ if (!real_dev || !dev || !rmnet_is_real_dev_registered(real_dev))
+ return -ENODEV;
+
+ port = rmnet_get_port_rtnl(real_dev);
+
+ if (data[IFLA_VLAN_ID]) {
+ mux_id = nla_get_u16(data[IFLA_VLAN_ID]);
+ ep = rmnet_get_endpoint(port, priv->mux_id);
+
+ hlist_del_init_rcu(&ep->hlnode);
+ hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
+
+ ep->mux_id = mux_id;
+ priv->mux_id = mux_id;
+ }
+
+ if (data[IFLA_VLAN_FLAGS]) {
+ struct ifla_vlan_flags *flags;
+
+ flags = nla_data(data[IFLA_VLAN_FLAGS]);
+ port->data_format = flags->flags & flags->mask;
+ }
+
+ return 0;
+}
+
static size_t rmnet_get_size(const struct net_device *dev)
{
- return nla_total_size(2); /* IFLA_VLAN_ID */
+ return nla_total_size(2) /* IFLA_VLAN_ID */ +
+ nla_total_size(sizeof(struct ifla_vlan_flags)); /* IFLA_VLAN_FLAGS */
}
struct rtnl_link_ops rmnet_link_ops __read_mostly = {
@@ -331,6 +374,7 @@ struct rtnl_link_ops rmnet_link_ops __read_mostly = {
.newlink = rmnet_newlink,
.dellink = rmnet_dellink,
.get_size = rmnet_get_size,
+ .changelink = rmnet_changelink,
};
/* Needs either rcu_read_lock() or rtnl lock */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
index c19259eea99e..00e4634100d3 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
@@ -32,8 +32,7 @@ struct rmnet_endpoint {
*/
struct rmnet_port {
struct net_device *dev;
- u32 ingress_data_format;
- u32 egress_data_format;
+ u32 data_format;
u8 nr_rmnet_devs;
u8 rmnet_mode;
struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 08e4afc0ab39..601edec28c5f 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -15,6 +15,8 @@
#include <linux/netdevice.h>
#include <linux/netdev_features.h>
+#include <linux/if_arp.h>
+#include <net/sock.h>
#include "rmnet_private.h"
#include "rmnet_config.h"
#include "rmnet_vnd.h"
@@ -64,19 +66,19 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port)
{
struct rmnet_endpoint *ep;
+ u16 len, pad;
u8 mux_id;
- u16 len;
if (RMNET_MAP_GET_CD_BIT(skb)) {
- if (port->ingress_data_format
- & RMNET_INGRESS_FORMAT_MAP_COMMANDS)
+ if (port->data_format & RMNET_INGRESS_FORMAT_MAP_COMMANDS)
return rmnet_map_command(skb, port);
goto free_skb;
}
mux_id = RMNET_MAP_GET_MUX_ID(skb);
- len = RMNET_MAP_GET_LENGTH(skb) - RMNET_MAP_GET_PAD(skb);
+ pad = RMNET_MAP_GET_PAD(skb);
+ len = RMNET_MAP_GET_LENGTH(skb) - pad;
if (mux_id >= RMNET_MAX_LOGICAL_EP)
goto free_skb;
@@ -89,8 +91,14 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
/* Subtract MAP header */
skb_pull(skb, sizeof(struct rmnet_map_header));
- skb_trim(skb, len);
rmnet_set_skb_proto(skb);
+
+ if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4) {
+ if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+
+ skb_trim(skb, len);
rmnet_deliver_skb(skb);
return;
@@ -104,8 +112,17 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
{
struct sk_buff *skbn;
- if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
- while ((skbn = rmnet_map_deaggregate(skb)) != NULL)
+ if (skb->dev->type == ARPHRD_ETHER) {
+ if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_KERNEL)) {
+ kfree_skb(skb);
+ return;
+ }
+
+ skb_push(skb, ETH_HLEN);
+ }
+
+ if (port->data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
+ while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL)
__rmnet_map_ingress_handler(skbn, port);
consume_skb(skb);
@@ -124,29 +141,32 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
additional_header_len = 0;
required_headroom = sizeof(struct rmnet_map_header);
+ if (port->data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4) {
+ additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
+ required_headroom += additional_header_len;
+ }
+
if (skb_headroom(skb) < required_headroom) {
if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
goto fail;
}
+ if (port->data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4)
+ rmnet_map_checksum_uplink_packet(skb, orig_dev);
+
map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
if (!map_header)
goto fail;
- if (port->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) {
- if (mux_id == 0xff)
- map_header->mux_id = 0;
- else
- map_header->mux_id = mux_id;
- }
+ map_header->mux_id = mux_id;
skb->protocol = htons(ETH_P_MAP);
- return RMNET_MAP_SUCCESS;
+ return 0;
fail:
kfree_skb(skb);
- return RMNET_MAP_CONSUMED;
+ return -ENOMEM;
}
static void
@@ -178,8 +198,7 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
switch (port->rmnet_mode) {
case RMNET_EPMODE_VND:
- if (port->ingress_data_format & RMNET_INGRESS_FORMAT_MAP)
- rmnet_map_ingress_handler(skb, port);
+ rmnet_map_ingress_handler(skb, port);
break;
case RMNET_EPMODE_BRIDGE:
rmnet_bridge_handler(skb, port->bridge_ep);
@@ -201,6 +220,8 @@ void rmnet_egress_handler(struct sk_buff *skb)
struct rmnet_priv *priv;
u8 mux_id;
+ sk_pacing_shift_update(skb->sk, 8);
+
orig_dev = skb->dev;
priv = netdev_priv(orig_dev);
skb->dev = priv->real_dev;
@@ -212,19 +233,8 @@ void rmnet_egress_handler(struct sk_buff *skb)
return;
}
- if (port->egress_data_format & RMNET_EGRESS_FORMAT_MAP) {
- switch (rmnet_map_egress_handler(skb, port, mux_id, orig_dev)) {
- case RMNET_MAP_CONSUMED:
- return;
-
- case RMNET_MAP_SUCCESS:
- break;
-
- default:
- kfree_skb(skb);
- return;
- }
- }
+ if (rmnet_map_egress_handler(skb, port, mux_id, orig_dev))
+ return;
rmnet_vnd_tx_fixup(skb, orig_dev);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index 3af3fe7b5457..6ce31e29136d 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -30,15 +30,6 @@ struct rmnet_map_control_command {
};
} __aligned(1);
-enum rmnet_map_results {
- RMNET_MAP_SUCCESS,
- RMNET_MAP_CONSUMED,
- RMNET_MAP_GENERAL_FAILURE,
- RMNET_MAP_NOT_ENABLED,
- RMNET_MAP_FAILED_AGGREGATION,
- RMNET_MAP_FAILED_MUX
-};
-
enum rmnet_map_commands {
RMNET_MAP_COMMAND_NONE,
RMNET_MAP_COMMAND_FLOW_DISABLE,
@@ -56,6 +47,22 @@ struct rmnet_map_header {
u16 pkt_len;
} __aligned(1);
+struct rmnet_map_dl_csum_trailer {
+ u8 reserved1;
+ u8 valid:1;
+ u8 reserved2:7;
+ u16 csum_start_offset;
+ u16 csum_length;
+ __be16 csum_value;
+} __aligned(1);
+
+struct rmnet_map_ul_csum_header {
+ __be16 csum_start_offset;
+ u16 csum_insert_offset:14;
+ u16 udp_ip4_ind:1;
+ u16 csum_enabled:1;
+} __aligned(1);
+
#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \
(Y)->data)->mux_id)
#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \
@@ -76,10 +83,13 @@ struct rmnet_map_header {
#define RMNET_MAP_NO_PAD_BYTES 0
#define RMNET_MAP_ADD_PAD_BYTES 1
-u8 rmnet_map_demultiplex(struct sk_buff *skb);
-struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb);
+struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
+ struct rmnet_port *port);
struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
int hdrlen, int pad);
void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
+int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
+void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+ struct net_device *orig_dev);
#endif /* _RMNET_MAP_H_ */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
index 51e604923ac1..6bc328fb88e1 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
@@ -58,11 +58,24 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
}
static void rmnet_map_send_ack(struct sk_buff *skb,
- unsigned char type)
+ unsigned char type,
+ struct rmnet_port *port)
{
struct rmnet_map_control_command *cmd;
int xmit_status;
+ if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4) {
+ if (skb->len < sizeof(struct rmnet_map_header) +
+ RMNET_MAP_GET_LENGTH(skb) +
+ sizeof(struct rmnet_map_dl_csum_trailer)) {
+ kfree_skb(skb);
+ return;
+ }
+
+ skb_trim(skb, skb->len -
+ sizeof(struct rmnet_map_dl_csum_trailer));
+ }
+
skb->protocol = htons(ETH_P_MAP);
cmd = RMNET_MAP_GET_CMD_START(skb);
@@ -100,5 +113,5 @@ void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port)
break;
}
if (rc == RMNET_MAP_COMMAND_ACK)
- rmnet_map_send_ack(skb, rc);
+ rmnet_map_send_ack(skb, rc, port);
}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 86b8c758f94e..c74a6c56d315 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -14,6 +14,9 @@
*/
#include <linux/netdevice.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
#include "rmnet_config.h"
#include "rmnet_map.h"
#include "rmnet_private.h"
@@ -21,6 +24,233 @@
#define RMNET_MAP_DEAGGR_SPACING 64
#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
+static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
+ const void *txporthdr)
+{
+ __sum16 *check = NULL;
+
+ switch (protocol) {
+ case IPPROTO_TCP:
+ check = &(((struct tcphdr *)txporthdr)->check);
+ break;
+
+ case IPPROTO_UDP:
+ check = &(((struct udphdr *)txporthdr)->check);
+ break;
+
+ default:
+ check = NULL;
+ break;
+ }
+
+ return check;
+}
+
+static int
+rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
+ struct rmnet_map_dl_csum_trailer *csum_trailer)
+{
+ __sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum;
+ u16 csum_value, csum_value_final;
+ struct iphdr *ip4h;
+ void *txporthdr;
+ __be16 addend;
+
+ ip4h = (struct iphdr *)(skb->data);
+ if ((ntohs(ip4h->frag_off) & IP_MF) ||
+ ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0))
+ return -EOPNOTSUPP;
+
+ txporthdr = skb->data + ip4h->ihl * 4;
+
+ csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
+
+ if (!csum_field)
+ return -EPROTONOSUPPORT;
+
+ /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
+ if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP)
+ return 0;
+
+ csum_value = ~ntohs(csum_trailer->csum_value);
+ hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
+ ip_payload_csum = csum16_sub((__force __sum16)csum_value,
+ (__force __be16)hdr_csum);
+
+ pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
+ ntohs(ip4h->tot_len) - ip4h->ihl * 4,
+ ip4h->protocol, 0);
+ addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
+ pseudo_csum = csum16_add(ip_payload_csum, addend);
+
+ addend = (__force __be16)ntohs((__force __be16)*csum_field);
+ csum_temp = ~csum16_sub(pseudo_csum, addend);
+ csum_value_final = (__force u16)csum_temp;
+
+ if (unlikely(csum_value_final == 0)) {
+ switch (ip4h->protocol) {
+ case IPPROTO_UDP:
+ /* RFC 768 - DL4 1's complement rule for UDP csum 0 */
+ csum_value_final = ~csum_value_final;
+ break;
+
+ case IPPROTO_TCP:
+ /* DL4 Non-RFC compliant TCP checksum found */
+ if (*csum_field == (__force __sum16)0xFFFF)
+ csum_value_final = ~csum_value_final;
+ break;
+ }
+ }
+
+ if (csum_value_final == ntohs((__force __be16)*csum_field))
+ return 0;
+ else
+ return -EINVAL;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int
+rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
+ struct rmnet_map_dl_csum_trailer *csum_trailer)
+{
+ __sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp;
+ u16 csum_value, csum_value_final;
+ __be16 ip6_hdr_csum, addend;
+ struct ipv6hdr *ip6h;
+ void *txporthdr;
+ u32 length;
+
+ ip6h = (struct ipv6hdr *)(skb->data);
+
+ txporthdr = skb->data + sizeof(struct ipv6hdr);
+ csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
+
+ if (!csum_field)
+ return -EPROTONOSUPPORT;
+
+ csum_value = ~ntohs(csum_trailer->csum_value);
+ ip6_hdr_csum = (__force __be16)
+ ~ntohs((__force __be16)ip_compute_csum(ip6h,
+ (int)(txporthdr - (void *)(skb->data))));
+ ip6_payload_csum = csum16_sub((__force __sum16)csum_value,
+ ip6_hdr_csum);
+
+ length = (ip6h->nexthdr == IPPROTO_UDP) ?
+ ntohs(((struct udphdr *)txporthdr)->len) :
+ ntohs(ip6h->payload_len);
+ pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+ length, ip6h->nexthdr, 0));
+ addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
+ pseudo_csum = csum16_add(ip6_payload_csum, addend);
+
+ addend = (__force __be16)ntohs((__force __be16)*csum_field);
+ csum_temp = ~csum16_sub(pseudo_csum, addend);
+ csum_value_final = (__force u16)csum_temp;
+
+ if (unlikely(csum_value_final == 0)) {
+ switch (ip6h->nexthdr) {
+ case IPPROTO_UDP:
+ /* RFC 2460 section 8.1
+ * DL6 One's complement rule for UDP checksum 0
+ */
+ csum_value_final = ~csum_value_final;
+ break;
+
+ case IPPROTO_TCP:
+ /* DL6 Non-RFC compliant TCP checksum found */
+ if (*csum_field == (__force __sum16)0xFFFF)
+ csum_value_final = ~csum_value_final;
+ break;
+ }
+ }
+
+ if (csum_value_final == ntohs((__force __be16)*csum_field))
+ return 0;
+ else
+ return -EINVAL;
+}
+#endif
+
+static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
+{
+ struct iphdr *ip4h = (struct iphdr *)iphdr;
+ void *txphdr;
+ u16 *csum;
+
+ txphdr = iphdr + ip4h->ihl * 4;
+
+ if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
+ csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
+ *csum = ~(*csum);
+ }
+}
+
+static void
+rmnet_map_ipv4_ul_csum_header(void *iphdr,
+ struct rmnet_map_ul_csum_header *ul_header,
+ struct sk_buff *skb)
+{
+ struct iphdr *ip4h = (struct iphdr *)iphdr;
+ __be16 *hdr = (__be16 *)ul_header, offset;
+
+ offset = htons((__force u16)(skb_transport_header(skb) -
+ (unsigned char *)iphdr));
+ ul_header->csum_start_offset = offset;
+ ul_header->csum_insert_offset = skb->csum_offset;
+ ul_header->csum_enabled = 1;
+ if (ip4h->protocol == IPPROTO_UDP)
+ ul_header->udp_ip4_ind = 1;
+ else
+ ul_header->udp_ip4_ind = 0;
+
+ /* Changing remaining fields to network order */
+ hdr++;
+ *hdr = htons((__force u16)*hdr);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
+{
+ struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
+ void *txphdr;
+ u16 *csum;
+
+ txphdr = ip6hdr + sizeof(struct ipv6hdr);
+
+ if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
+ csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
+ *csum = ~(*csum);
+ }
+}
+
+static void
+rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
+ struct rmnet_map_ul_csum_header *ul_header,
+ struct sk_buff *skb)
+{
+ __be16 *hdr = (__be16 *)ul_header, offset;
+
+ offset = htons((__force u16)(skb_transport_header(skb) -
+ (unsigned char *)ip6hdr));
+ ul_header->csum_start_offset = offset;
+ ul_header->csum_insert_offset = skb->csum_offset;
+ ul_header->csum_enabled = 1;
+ ul_header->udp_ip4_ind = 0;
+
+ /* Changing remaining fields to network order */
+ hdr++;
+ *hdr = htons((__force u16)*hdr);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
+}
+#endif
+
/* Adds MAP header to front of skb->data
* Padding is calculated and set appropriately in MAP header. Mux ID is
* initialized to 0.
@@ -32,9 +262,6 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
u32 padding, map_datalen;
u8 *padbytes;
- if (skb_headroom(skb) < sizeof(struct rmnet_map_header))
- return NULL;
-
map_datalen = skb->len - hdrlen;
map_header = (struct rmnet_map_header *)
skb_push(skb, sizeof(struct rmnet_map_header));
@@ -69,7 +296,8 @@ done:
* returned, indicating that there are no more packets to deaggregate. Caller
* is responsible for freeing the original skb.
*/
-struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb)
+struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
+ struct rmnet_port *port)
{
struct rmnet_map_header *maph;
struct sk_buff *skbn;
@@ -81,6 +309,9 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb)
maph = (struct rmnet_map_header *)skb->data;
packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header);
+ if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4)
+ packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
+
if (((int)skb->len - (int)packet_len) < 0)
return NULL;
@@ -100,3 +331,73 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb)
return skbn;
}
+
+/* Validates packet checksums. Function takes a pointer to
+ * the beginning of a buffer which contains the IP payload +
+ * padding + checksum trailer.
+ * Only IPv4 and IPv6 are supported along with TCP & UDP.
+ * Fragmented or tunneled packets are not supported.
+ */
+int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
+{
+ struct rmnet_map_dl_csum_trailer *csum_trailer;
+
+ if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
+ return -EOPNOTSUPP;
+
+ csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);
+
+ if (!csum_trailer->valid)
+ return -EINVAL;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+#if IS_ENABLED(CONFIG_IPV6)
+ return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer);
+#else
+ return -EPROTONOSUPPORT;
+#endif
+
+ return 0;
+}
+
+/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
+ * packets that are supported for UL checksum offload.
+ */
+void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+ struct net_device *orig_dev)
+{
+ struct rmnet_map_ul_csum_header *ul_header;
+ void *iphdr;
+
+ ul_header = (struct rmnet_map_ul_csum_header *)
+ skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
+
+ if (unlikely(!(orig_dev->features &
+ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
+ goto sw_csum;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ iphdr = (char *)ul_header +
+ sizeof(struct rmnet_map_ul_csum_header);
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
+ return;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+#if IS_ENABLED(CONFIG_IPV6)
+ rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
+ return;
+#else
+ goto sw_csum;
+#endif
+ }
+ }
+
+sw_csum:
+ ul_header->csum_start_offset = 0;
+ ul_header->csum_insert_offset = 0;
+ ul_header->csum_enabled = 0;
+ ul_header->udp_ip4_ind = 0;
+}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
index 49102f922b31..de0143eaa05a 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
@@ -19,14 +19,10 @@
#define RMNET_TX_QUEUE_LEN 1000
/* Constants */
-#define RMNET_EGRESS_FORMAT_MAP BIT(1)
-#define RMNET_EGRESS_FORMAT_AGGREGATION BIT(2)
-#define RMNET_EGRESS_FORMAT_MUXING BIT(3)
-
-#define RMNET_INGRESS_FORMAT_MAP BIT(1)
-#define RMNET_INGRESS_FORMAT_DEAGGREGATION BIT(2)
-#define RMNET_INGRESS_FORMAT_DEMUXING BIT(3)
-#define RMNET_INGRESS_FORMAT_MAP_COMMANDS BIT(4)
+#define RMNET_INGRESS_FORMAT_DEAGGREGATION BIT(0)
+#define RMNET_INGRESS_FORMAT_MAP_COMMANDS BIT(1)
+#define RMNET_INGRESS_FORMAT_MAP_CKSUMV4 BIT(2)
+#define RMNET_EGRESS_FORMAT_MAP_CKSUMV4 BIT(3)
/* Replace skb->dev to a virtual rmnet device and pass up the stack */
#define RMNET_EPMODE_VND (1)
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 9caa5e387450..570a227acdd8 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -185,6 +185,13 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
if (ep->egress_dev)
return -EINVAL;
+ if (rmnet_get_endpoint(port, id))
+ return -EBUSY;
+
+ rmnet_dev->hw_features = NETIF_F_RXCSUM;
+ rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ rmnet_dev->hw_features |= NETIF_F_SG;
+
rc = register_netdevice(rmnet_dev);
if (!rc) {
ep->egress_dev = rmnet_dev;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index e7ab23e87de2..81045dfa1cd8 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -748,8 +748,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
mss = skb_shinfo(skb)->gso_size;
if (mss > MSSMask) {
- WARN_ONCE(1, "Net bug: GSO size %d too large for 8139CP\n",
- mss);
+ netdev_WARN_ONCE(dev, "Net bug: GSO size %d too large for 8139CP\n",
+ mss);
goto out_dma_error;
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 734286ebe5ef..0bf7d1759250 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1395,7 +1395,7 @@ DECLARE_RTL_COND(rtl_ocp_tx_cond)
{
void __iomem *ioaddr = tp->mmio_addr;
- return RTL_R8(IBISR0) & 0x02;
+ return RTL_R8(IBISR0) & 0x20;
}
static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
@@ -1403,7 +1403,7 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
void __iomem *ioaddr = tp->mmio_addr;
RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01);
- rtl_msleep_loop_wait_low(tp, &rtl_ocp_tx_cond, 50, 2000);
+ rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000);
RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20);
RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01);
}
@@ -1675,33 +1675,24 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
}
}
-static void __rtl8169_check_link_status(struct net_device *dev,
- struct rtl8169_private *tp,
- void __iomem *ioaddr, bool pm)
+static void rtl8169_check_link_status(struct net_device *dev,
+ struct rtl8169_private *tp,
+ void __iomem *ioaddr)
{
if (tp->link_ok(ioaddr)) {
rtl_link_chg_patch(tp);
/* This is to cancel a scheduled suspend if there's one. */
- if (pm)
- pm_request_resume(&tp->pci_dev->dev);
+ pm_request_resume(&tp->pci_dev->dev);
netif_carrier_on(dev);
if (net_ratelimit())
netif_info(tp, ifup, dev, "link up\n");
} else {
netif_carrier_off(dev);
netif_info(tp, ifdown, dev, "link down\n");
- if (pm)
- pm_schedule_suspend(&tp->pci_dev->dev, 5000);
+ pm_runtime_idle(&tp->pci_dev->dev);
}
}
-static void rtl8169_check_link_status(struct net_device *dev,
- struct rtl8169_private *tp,
- void __iomem *ioaddr)
-{
- __rtl8169_check_link_status(dev, tp, ioaddr, false);
-}
-
#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
@@ -4638,16 +4629,6 @@ static void rtl8169_phy_timer(struct timer_list *t)
rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
}
-static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
- void __iomem *ioaddr)
-{
- iounmap(ioaddr);
- pci_release_regions(pdev);
- pci_clear_mwi(pdev);
- pci_disable_device(pdev);
- free_netdev(dev);
-}
-
DECLARE_RTL_COND(rtl_phy_reset_cond)
{
return tp->phy_reset_pending(tp);
@@ -4779,14 +4760,6 @@ static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data
return -EOPNOTSUPP;
}
-static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
-{
- if (tp->features & RTL_FEATURE_MSI) {
- pci_disable_msi(pdev);
- tp->features &= ~RTL_FEATURE_MSI;
- }
-}
-
static void rtl_init_mdio_ops(struct rtl8169_private *tp)
{
struct mdio_ops *ops = &tp->mdio_ops;
@@ -7759,7 +7732,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp)
rtl8169_pcierr_interrupt(dev);
if (status & LinkChg)
- __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
+ rtl8169_check_link_status(dev, tp, tp->mmio_addr);
rtl_irq_enable_all(tp);
}
@@ -7972,7 +7945,7 @@ static int rtl_open(struct net_device *dev)
rtl_unlock_work(tp);
tp->saved_wolopts = 0;
- pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
rtl8169_check_link_status(dev, tp, ioaddr);
out:
@@ -8116,8 +8089,10 @@ static int rtl8169_runtime_suspend(struct device *device)
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- if (!tp->TxDescArray)
+ if (!tp->TxDescArray) {
+ rtl_pll_power_down(tp);
return 0;
+ }
rtl_lock_work(tp);
tp->saved_wolopts = __rtl8169_get_wol(tp);
@@ -8159,9 +8134,11 @@ static int rtl8169_runtime_idle(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
- struct rtl8169_private *tp = netdev_priv(dev);
- return tp->TxDescArray ? -EBUSY : 0;
+ if (!netif_running(dev) || !netif_carrier_ok(dev))
+ pm_schedule_suspend(device, 10000);
+
+ return -EBUSY;
}
static const struct dev_pm_ops rtl8169_pm_ops = {
@@ -8208,9 +8185,6 @@ static void rtl_shutdown(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- struct device *d = &pdev->dev;
-
- pm_runtime_get_sync(d);
rtl8169_net_suspend(dev);
@@ -8228,8 +8202,6 @@ static void rtl_shutdown(struct pci_dev *pdev)
pci_wake_from_d3(pdev, true);
pci_set_power_state(pdev, PCI_D3hot);
}
-
- pm_runtime_put_noidle(d);
}
static void rtl_remove_one(struct pci_dev *pdev)
@@ -8251,9 +8223,6 @@ static void rtl_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
- dma_free_coherent(&tp->pci_dev->dev, sizeof(*tp->counters),
- tp->counters, tp->counters_phys_addr);
-
rtl_release_firmware(tp);
if (pci_dev_run_wake(pdev))
@@ -8261,9 +8230,6 @@ static void rtl_remove_one(struct pci_dev *pdev)
/* restore original MAC address */
rtl_rar_set(tp, dev->perm_addr);
-
- rtl_disable_msi(pdev, tp);
- rtl8169_release_board(pdev, dev, tp->mmio_addr);
}
static const struct net_device_ops rtl_netdev_ops = {
@@ -8440,11 +8406,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
MODULENAME, RTL8169_VERSION);
}
- dev = alloc_etherdev(sizeof (*tp));
- if (!dev) {
- rc = -ENOMEM;
- goto out;
- }
+ dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
+ if (!dev)
+ return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
dev->netdev_ops = &rtl_netdev_ops;
@@ -8467,13 +8431,13 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
PCIE_LINK_STATE_CLKPM);
/* enable device (incl. PCI PM wakeup and hotplug setup) */
- rc = pci_enable_device(pdev);
+ rc = pcim_enable_device(pdev);
if (rc < 0) {
netif_err(tp, probe, dev, "enable failure\n");
- goto err_out_free_dev_1;
+ return rc;
}
- if (pci_set_mwi(pdev) < 0)
+ if (pcim_set_mwi(pdev) < 0)
netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
/* make sure PCI base addr 1 is MMIO */
@@ -8481,30 +8445,28 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_err(tp, probe, dev,
"region #%d not an MMIO resource, aborting\n",
region);
- rc = -ENODEV;
- goto err_out_mwi_2;
+ return -ENODEV;
}
/* check for weird/broken PCI region reporting */
if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
netif_err(tp, probe, dev,
"Invalid PCI region size(s), aborting\n");
- rc = -ENODEV;
- goto err_out_mwi_2;
+ return -ENODEV;
}
rc = pci_request_regions(pdev, MODULENAME);
if (rc < 0) {
netif_err(tp, probe, dev, "could not request regions\n");
- goto err_out_mwi_2;
+ return rc;
}
/* ioremap MMIO region */
- ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
+ ioaddr = devm_ioremap(&pdev->dev, pci_resource_start(pdev, region),
+ R8169_REGS_SIZE);
if (!ioaddr) {
netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
- rc = -EIO;
- goto err_out_free_res_3;
+ return -EIO;
}
tp->mmio_addr = ioaddr;
@@ -8530,7 +8492,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc < 0) {
netif_err(tp, probe, dev, "DMA configuration failed\n");
- goto err_out_unmap_4;
+ return rc;
}
}
@@ -8692,16 +8654,15 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
- tp->counters = dma_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
- &tp->counters_phys_addr, GFP_KERNEL);
- if (!tp->counters) {
- rc = -ENOMEM;
- goto err_out_msi_5;
- }
+ tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
+ &tp->counters_phys_addr,
+ GFP_KERNEL);
+ if (!tp->counters)
+ return -ENOMEM;
rc = register_netdev(dev);
if (rc < 0)
- goto err_out_cnt_6;
+ return rc;
pci_set_drvdata(pdev, dev);
@@ -8725,30 +8686,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl8168_driver_start(tp);
}
- if (pci_dev_run_wake(pdev))
- pm_runtime_put_noidle(&pdev->dev);
-
netif_carrier_off(dev);
-out:
- return rc;
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_put_sync(&pdev->dev);
-err_out_cnt_6:
- dma_free_coherent(&pdev->dev, sizeof(*tp->counters), tp->counters,
- tp->counters_phys_addr);
-err_out_msi_5:
- netif_napi_del(&tp->napi);
- rtl_disable_msi(pdev, tp);
-err_out_unmap_4:
- iounmap(ioaddr);
-err_out_free_res_3:
- pci_release_regions(pdev);
-err_out_mwi_2:
- pci_clear_mwi(pdev);
- pci_disable_device(pdev);
-err_out_free_dev_1:
- free_netdev(dev);
- goto out;
+ return 0;
}
static struct pci_driver rtl8169_pci_driver = {
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 009780df664b..c87f57ca4437 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2205,8 +2205,7 @@ out_dma_free:
if (chip_id != RCAR_GEN2)
ravb_ptp_stop(ndev);
out_release:
- if (ndev)
- free_netdev(ndev);
+ free_netdev(ndev);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 53924a4fc31c..a197e11f3a56 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3125,7 +3125,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
const struct platform_device_id *id = platform_get_device_id(pdev);
struct sh_eth_private *mdp;
struct net_device *ndev;
- int ret, devno;
+ int ret;
/* get base addr */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -3137,10 +3137,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
- devno = pdev->id;
- if (devno < 0)
- devno = 0;
-
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto out_release;
@@ -3222,8 +3218,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
eth_hw_addr_random(ndev);
}
- /* ioremap the TSU registers */
if (mdp->cd->tsu) {
+ int port = pdev->id < 0 ? 0 : pdev->id % 2;
struct resource *rtsu;
rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -3235,7 +3231,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* We can only request the TSU region for the first port
* of the two sharing this TSU for the probe to succeed...
*/
- if (devno % 2 == 0 &&
+ if (port == 0 &&
!devm_request_mem_region(&pdev->dev, rtsu->start,
resource_size(rtsu),
dev_name(&pdev->dev))) {
@@ -3243,6 +3239,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
ret = -EBUSY;
goto out_release;
}
+ /* ioremap the TSU registers */
mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
resource_size(rtsu));
if (!mdp->tsu_addr) {
@@ -3250,16 +3247,14 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto out_release;
}
- mdp->port = devno % 2;
+ mdp->port = port;
ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
- }
- /* Need to init only the first port of the two sharing a TSU */
- if (devno % 2 == 0) {
- if (mdp->cd->chip_reset)
- mdp->cd->chip_reset(ndev);
+ /* Need to init only the first port of the two sharing a TSU */
+ if (port == 0) {
+ if (mdp->cd->chip_reset)
+ mdp->cd->chip_reset(ndev);
- if (mdp->cd->tsu) {
/* TSU init (Init only)*/
sh_eth_tsu_init(mdp);
}
@@ -3301,8 +3296,7 @@ out_napi_del:
out_release:
/* net_dev free */
- if (ndev)
- free_netdev(ndev);
+ free_netdev(ndev);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 6d6fb8cf3e7c..6473cc68c2d5 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -789,7 +789,6 @@ static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port,
ofdpa_flags_nowait(flags),
ofdpa_cmd_flow_tbl_add,
found, NULL, NULL);
- return 0;
}
static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port,
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index e566dbb3343d..75fbf58e421c 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -160,11 +160,31 @@ static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
}
+/* On all EF10s up to and including SFC9220 (Medford1), all PFs use BAR 0 for
+ * I/O space and BAR 2(&3) for memory. On SFC9250 (Medford2), there is no I/O
+ * bar; PFs use BAR 0/1 for memory.
+ */
+static unsigned int efx_ef10_pf_mem_bar(struct efx_nic *efx)
+{
+ switch (efx->pci_dev->device) {
+ case 0x0b03: /* SFC9250 PF */
+ return 0;
+ default:
+ return 2;
+ }
+}
+
+/* All VFs use BAR 0/1 for memory */
+static unsigned int efx_ef10_vf_mem_bar(struct efx_nic *efx)
+{
+ return 0;
+}
+
static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
{
int bar;
- bar = efx->type->mem_bar;
+ bar = efx->type->mem_bar(efx);
return resource_size(&efx->pci_dev->resource[bar]);
}
@@ -213,7 +233,7 @@ static int efx_ef10_get_vf_index(struct efx_nic *efx)
static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V2_OUT_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V4_OUT_LEN);
struct efx_ef10_nic_data *nic_data = efx->nic_data;
size_t outlen;
int rc;
@@ -257,9 +277,70 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
return -ENODEV;
}
+ if (outlen >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) {
+ u8 vi_window_mode = MCDI_BYTE(outbuf,
+ GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
+
+ switch (vi_window_mode) {
+ case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
+ efx->vi_stride = 8192;
+ break;
+ case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
+ efx->vi_stride = 16384;
+ break;
+ case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
+ efx->vi_stride = 65536;
+ break;
+ default:
+ netif_err(efx, probe, efx->net_dev,
+ "Unrecognised VI window mode %d\n",
+ vi_window_mode);
+ return -EIO;
+ }
+ netif_dbg(efx, probe, efx->net_dev, "vi_stride = %u\n",
+ efx->vi_stride);
+ } else {
+ /* keep default VI stride */
+ netif_dbg(efx, probe, efx->net_dev,
+ "firmware did not report VI window mode, assuming vi_stride = %u\n",
+ efx->vi_stride);
+ }
+
+ if (outlen >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
+ efx->num_mac_stats = MCDI_WORD(outbuf,
+ GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
+ netif_dbg(efx, probe, efx->net_dev,
+ "firmware reports num_mac_stats = %u\n",
+ efx->num_mac_stats);
+ } else {
+ /* leave num_mac_stats as the default value, MC_CMD_MAC_NSTATS */
+ netif_dbg(efx, probe, efx->net_dev,
+ "firmware did not report num_mac_stats, assuming %u\n",
+ efx->num_mac_stats);
+ }
+
return 0;
}
+static void efx_ef10_read_licensed_features(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_LICENSING_V3_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_LICENSING_V3_OUT_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, LICENSING_V3_IN_OP,
+ MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_LICENSING_V3, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc || (outlen < MC_CMD_LICENSING_V3_OUT_LEN))
+ return;
+
+ nic_data->licensed_features = MCDI_QWORD(outbuf,
+ LICENSING_V3_OUT_LICENSED_FEATURES);
+}
+
static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
@@ -589,17 +670,6 @@ static int efx_ef10_probe(struct efx_nic *efx)
struct efx_ef10_nic_data *nic_data;
int i, rc;
- /* We can have one VI for each 8K region. However, until we
- * use TX option descriptors we need two TX queues per channel.
- */
- efx->max_channels = min_t(unsigned int,
- EFX_MAX_CHANNELS,
- efx_ef10_mem_map_size(efx) /
- (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
- efx->max_tx_channels = efx->max_channels;
- if (WARN_ON(efx->max_channels == 0))
- return -EIO;
-
nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
if (!nic_data)
return -ENOMEM;
@@ -671,6 +741,22 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc < 0)
goto fail5;
+ efx_ef10_read_licensed_features(efx);
+
+ /* We can have one VI for each vi_stride-byte region.
+ * However, until we use TX option descriptors we need two TX queues
+ * per channel.
+ */
+ efx->max_channels = min_t(unsigned int,
+ EFX_MAX_CHANNELS,
+ efx_ef10_mem_map_size(efx) /
+ (efx->vi_stride * EFX_TXQ_TYPES));
+ efx->max_tx_channels = efx->max_channels;
+ if (WARN_ON(efx->max_channels == 0)) {
+ rc = -EIO;
+ goto fail5;
+ }
+
efx->rx_packet_len_offset =
ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
@@ -695,7 +781,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc && rc != -EPERM)
goto fail5;
- efx_ptp_probe(efx, NULL);
+ efx_ptp_defer_probe_with_channel(efx);
#ifdef CONFIG_SFC_SRIOV
if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) {
@@ -865,6 +951,11 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx)
/* Link a buffer to each TX queue */
efx_for_each_channel(channel, efx) {
+ /* Extra channels, even those with TXQs (PTP), do not require
+ * PIO resources.
+ */
+ if (!channel->type->want_pio)
+ continue;
efx_for_each_channel_tx_queue(tx_queue, channel) {
/* We assign the PIO buffers to queues in
* reverse order to allow for the following
@@ -907,7 +998,7 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx)
} else {
tx_queue->piobuf =
nic_data->pio_write_base +
- index * EFX_VI_PAGE_SIZE + offset;
+ index * efx->vi_stride + offset;
tx_queue->piobuf_offset = offset;
netif_dbg(efx, probe, efx->net_dev,
"linked VI %u to PIO buffer %u offset %x addr %p\n",
@@ -1212,7 +1303,9 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
void __iomem *membase;
int rc;
- channel_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+ channel_vis = max(efx->n_channels,
+ (efx->n_tx_channels + efx->n_extra_tx_channels) *
+ EFX_TXQ_TYPES);
#ifdef EFX_USE_PIO
/* Try to allocate PIO buffers if wanted and if the full
@@ -1253,19 +1346,19 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
* for writing PIO buffers through.
*
* The UC mapping contains (channel_vis - 1) complete VIs and the
- * first half of the next VI. Then the WC mapping begins with
- * the second half of this last VI.
+ * first 4K of the next VI. Then the WC mapping begins with
+ * the remainder of this last VI.
*/
- uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * EFX_VI_PAGE_SIZE +
+ uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * efx->vi_stride +
ER_DZ_TX_PIOBUF);
if (nic_data->n_piobufs) {
/* pio_write_vi_base rounds down to give the number of complete
* VIs inside the UC mapping.
*/
- pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
+ pio_write_vi_base = uc_mem_map_size / efx->vi_stride;
wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
nic_data->n_piobufs) *
- EFX_VI_PAGE_SIZE) -
+ efx->vi_stride) -
uc_mem_map_size);
max_vis = pio_write_vi_base + nic_data->n_piobufs;
} else {
@@ -1337,7 +1430,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
nic_data->pio_write_vi_base = pio_write_vi_base;
nic_data->pio_write_base =
nic_data->wc_membase +
- (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF -
+ (pio_write_vi_base * efx->vi_stride + ER_DZ_TX_PIOBUF -
uc_mem_map_size);
rc = efx_ef10_link_piobufs(efx);
@@ -1571,6 +1664,29 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS),
EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES),
EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW),
+ EF10_DMA_STAT(fec_uncorrected_errors, FEC_UNCORRECTED_ERRORS),
+ EF10_DMA_STAT(fec_corrected_errors, FEC_CORRECTED_ERRORS),
+ EF10_DMA_STAT(fec_corrected_symbols_lane0, FEC_CORRECTED_SYMBOLS_LANE0),
+ EF10_DMA_STAT(fec_corrected_symbols_lane1, FEC_CORRECTED_SYMBOLS_LANE1),
+ EF10_DMA_STAT(fec_corrected_symbols_lane2, FEC_CORRECTED_SYMBOLS_LANE2),
+ EF10_DMA_STAT(fec_corrected_symbols_lane3, FEC_CORRECTED_SYMBOLS_LANE3),
+ EF10_DMA_STAT(ctpio_dmabuf_start, CTPIO_DMABUF_START),
+ EF10_DMA_STAT(ctpio_vi_busy_fallback, CTPIO_VI_BUSY_FALLBACK),
+ EF10_DMA_STAT(ctpio_long_write_success, CTPIO_LONG_WRITE_SUCCESS),
+ EF10_DMA_STAT(ctpio_missing_dbell_fail, CTPIO_MISSING_DBELL_FAIL),
+ EF10_DMA_STAT(ctpio_overflow_fail, CTPIO_OVERFLOW_FAIL),
+ EF10_DMA_STAT(ctpio_underflow_fail, CTPIO_UNDERFLOW_FAIL),
+ EF10_DMA_STAT(ctpio_timeout_fail, CTPIO_TIMEOUT_FAIL),
+ EF10_DMA_STAT(ctpio_noncontig_wr_fail, CTPIO_NONCONTIG_WR_FAIL),
+ EF10_DMA_STAT(ctpio_frm_clobber_fail, CTPIO_FRM_CLOBBER_FAIL),
+ EF10_DMA_STAT(ctpio_invalid_wr_fail, CTPIO_INVALID_WR_FAIL),
+ EF10_DMA_STAT(ctpio_vi_clobber_fallback, CTPIO_VI_CLOBBER_FALLBACK),
+ EF10_DMA_STAT(ctpio_unqualified_fallback, CTPIO_UNQUALIFIED_FALLBACK),
+ EF10_DMA_STAT(ctpio_runt_fallback, CTPIO_RUNT_FALLBACK),
+ EF10_DMA_STAT(ctpio_success, CTPIO_SUCCESS),
+ EF10_DMA_STAT(ctpio_fallback, CTPIO_FALLBACK),
+ EF10_DMA_STAT(ctpio_poison, CTPIO_POISON),
+ EF10_DMA_STAT(ctpio_erase, CTPIO_ERASE),
};
#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \
@@ -1646,6 +1762,43 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
(1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \
(1ULL << EF10_STAT_port_rx_dp_hlb_wait))
+/* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V2,
+ * indicated by returning a value >= MC_CMD_MAC_NSTATS_V2 in
+ * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
+ * These bits are in the second u64 of the raw mask.
+ */
+#define EF10_FEC_STAT_MASK ( \
+ (1ULL << (EF10_STAT_fec_uncorrected_errors - 64)) | \
+ (1ULL << (EF10_STAT_fec_corrected_errors - 64)) | \
+ (1ULL << (EF10_STAT_fec_corrected_symbols_lane0 - 64)) | \
+ (1ULL << (EF10_STAT_fec_corrected_symbols_lane1 - 64)) | \
+ (1ULL << (EF10_STAT_fec_corrected_symbols_lane2 - 64)) | \
+ (1ULL << (EF10_STAT_fec_corrected_symbols_lane3 - 64)))
+
+/* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V3,
+ * indicated by returning a value >= MC_CMD_MAC_NSTATS_V3 in
+ * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
+ * These bits are in the second u64 of the raw mask.
+ */
+#define EF10_CTPIO_STAT_MASK ( \
+ (1ULL << (EF10_STAT_ctpio_dmabuf_start - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_vi_busy_fallback - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_long_write_success - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_missing_dbell_fail - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_overflow_fail - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_underflow_fail - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_timeout_fail - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_noncontig_wr_fail - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_frm_clobber_fail - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_invalid_wr_fail - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_vi_clobber_fallback - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_unqualified_fallback - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_runt_fallback - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_success - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_fallback - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_poison - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_erase - 64)))
+
static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
{
u64 raw_mask = HUNT_COMMON_STAT_MASK;
@@ -1684,10 +1837,22 @@ static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
if (nic_data->datapath_caps &
(1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
- raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1;
+ raw_mask[1] = (1ULL << (EF10_STAT_V1_COUNT - 64)) - 1;
} else {
raw_mask[1] = 0;
}
+ /* Only show FEC stats when NIC supports MC_CMD_MAC_STATS_V2 */
+ if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V2)
+ raw_mask[1] |= EF10_FEC_STAT_MASK;
+
+ /* CTPIO stats appear in V3. Only show them on devices that actually
+ * support CTPIO. Although this driver doesn't use CTPIO others might,
+ * and we may be reporting the stats for the underlying port.
+ */
+ if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V3 &&
+ (nic_data->datapath_caps2 &
+ (1 << MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN)))
+ raw_mask[1] |= EF10_CTPIO_STAT_MASK;
#if BITS_PER_LONG == 64
BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2);
@@ -1791,7 +1956,7 @@ static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
dma_stats = efx->stats_buffer.addr;
- generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+ generation_end = dma_stats[efx->num_mac_stats - 1];
if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
return 0;
rmb();
@@ -1839,7 +2004,7 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
DECLARE_BITMAP(mask, EF10_STAT_COUNT);
__le64 generation_start, generation_end;
u64 *stats = nic_data->stats;
- u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64);
+ u32 dma_len = efx->num_mac_stats * sizeof(u64);
struct efx_buffer stats_buf;
__le64 *dma_stats;
int rc;
@@ -1864,7 +2029,7 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
}
dma_stats = stats_buf.addr;
- dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+ dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
@@ -1883,7 +2048,7 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
goto out;
}
- generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+ generation_end = dma_stats[efx->num_mac_stats - 1];
if (generation_end == EFX_MC_STATS_GENERATION_INVALID) {
WARN_ON_ONCE(1);
goto out;
@@ -1951,8 +2116,9 @@ static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
} else {
unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
- EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
- ERF_DZ_TC_TIMER_VAL, ticks);
+ EFX_POPULATE_DWORD_3(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
+ ERF_DZ_TC_TIMER_VAL, ticks,
+ ERF_FZ_TC_TMR_REL_VAL, ticks);
efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
channel->channel);
}
@@ -2263,12 +2429,25 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
int i;
BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
+ /* Only attempt to enable TX timestamping if we have the license for it,
+ * otherwise TXQ init will fail
+ */
+ if (!(nic_data->licensed_features &
+ (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN))) {
+ tx_queue->timestamping = false;
+ /* Disable sync events on this channel. */
+ if (efx->type->ptp_set_ts_sync_events)
+ efx->type->ptp_set_ts_sync_events(efx, false, false);
+ }
+
/* TSOv2 is a limited resource that can only be configured on a limited
* number of queues. TSO without checksum offload is not really a thing,
* so we only enable it for those queues.
+ * TSOv2 cannot be used with Hardware timestamping.
*/
if (csum_offload && (nic_data->datapath_caps2 &
- (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN))) {
+ (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) &&
+ !tx_queue->timestamping) {
tso_v2 = true;
netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n",
channel->channel);
@@ -2294,14 +2473,16 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
do {
- MCDI_POPULATE_DWORD_3(inbuf, INIT_TXQ_IN_FLAGS,
+ MCDI_POPULATE_DWORD_4(inbuf, INIT_TXQ_IN_FLAGS,
/* This flag was removed from mcdi_pcol.h for
* the non-_EXT version of INIT_TXQ. However,
* firmware still honours it.
*/
INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2,
INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
- INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
+ INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload,
+ INIT_TXQ_EXT_IN_FLAG_TIMESTAMP,
+ tx_queue->timestamping);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
NULL, 0, NULL);
@@ -2327,12 +2508,13 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
tx_queue->insert_count = 1;
txd = efx_tx_desc(tx_queue, 0);
- EFX_POPULATE_QWORD_4(*txd,
+ EFX_POPULATE_QWORD_5(*txd,
ESF_DZ_TX_DESC_IS_OPT, true,
ESF_DZ_TX_OPTION_TYPE,
ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
- ESF_DZ_TX_OPTION_IP_CSUM, csum_offload);
+ ESF_DZ_TX_OPTION_IP_CSUM, csum_offload,
+ ESF_DZ_TX_TIMESTAMP, tx_queue->timestamping);
tx_queue->write_count = 1;
if (tso_v2) {
@@ -3233,8 +3415,8 @@ static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel,
if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN &&
((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
- (rx_l4_class != ESE_DZ_L4_CLASS_TCP &&
- rx_l4_class != ESE_DZ_L4_CLASS_UDP))))
+ (rx_l4_class != ESE_FZ_L4_CLASS_TCP &&
+ rx_l4_class != ESE_FZ_L4_CLASS_UDP))))
netdev_WARN(efx->net_dev,
"invalid class for RX_TCPUDP_CKSUM_ERR: event="
EFX_QWORD_FMT "\n",
@@ -3271,8 +3453,8 @@ static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel,
EFX_QWORD_VAL(*event));
else if (unlikely((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
- (rx_l4_class != ESE_DZ_L4_CLASS_TCP &&
- rx_l4_class != ESE_DZ_L4_CLASS_UDP)))
+ (rx_l4_class != ESE_FZ_L4_CLASS_TCP &&
+ rx_l4_class != ESE_FZ_L4_CLASS_UDP)))
netdev_WARN(efx->net_dev,
"invalid class for RX_TCP_UDP_INNER_CHKSUM_ERR: event="
EFX_QWORD_FMT "\n",
@@ -3307,7 +3489,7 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS);
- rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
+ rx_l4_class = EFX_QWORD_FIELD(*event, ESF_FZ_RX_L4_CLASS);
rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
rx_encap_hdr =
nic_data->datapath_caps &
@@ -3385,8 +3567,8 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
rx_l3_class, rx_l4_class,
event);
} else {
- bool tcpudp = rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
- rx_l4_class == ESE_DZ_L4_CLASS_UDP;
+ bool tcpudp = rx_l4_class == ESE_FZ_L4_CLASS_TCP ||
+ rx_l4_class == ESE_FZ_L4_CLASS_UDP;
switch (rx_encap_hdr) {
case ESE_EZ_ENCAP_HDR_VXLAN: /* VxLAN or GENEVE */
@@ -3407,7 +3589,7 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
}
}
- if (rx_l4_class == ESE_DZ_L4_CLASS_TCP)
+ if (rx_l4_class == ESE_FZ_L4_CLASS_TCP)
flags |= EFX_RX_PKT_TCP;
channel->irq_mod_score += 2 * n_packets;
@@ -3427,31 +3609,92 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
return n_packets;
}
-static int
+static u32 efx_ef10_extract_event_ts(efx_qword_t *event)
+{
+ u32 tstamp;
+
+ tstamp = EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI);
+ tstamp <<= 16;
+ tstamp |= EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO);
+
+ return tstamp;
+}
+
+static void
efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
{
struct efx_nic *efx = channel->efx;
struct efx_tx_queue *tx_queue;
unsigned int tx_ev_desc_ptr;
unsigned int tx_ev_q_label;
- int tx_descs = 0;
+ unsigned int tx_ev_type;
+ u64 ts_part;
if (unlikely(READ_ONCE(efx->reset_pending)))
- return 0;
+ return;
if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
- return 0;
+ return;
- /* Transmit completion */
- tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
+ /* Get the transmit queue */
tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
tx_queue = efx_channel_get_tx_queue(channel,
tx_ev_q_label % EFX_TXQ_TYPES);
- tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) &
- tx_queue->ptr_mask);
- efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
- return tx_descs;
+ if (!tx_queue->timestamping) {
+ /* Transmit completion */
+ tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
+ efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
+ return;
+ }
+
+ /* Transmit timestamps are only available for 8XXX series. They result
+ * in three events per packet. These occur in order, and are:
+ * - the normal completion event
+ * - the low part of the timestamp
+ * - the high part of the timestamp
+ *
+ * Each part of the timestamp is itself split across two 16 bit
+ * fields in the event.
+ */
+ tx_ev_type = EFX_QWORD_FIELD(*event, ESF_EZ_TX_SOFT1);
+
+ switch (tx_ev_type) {
+ case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION:
+ /* In case of Queue flush or FLR, we might have received
+ * the previous TX completion event but not the Timestamp
+ * events.
+ */
+ if (tx_queue->completed_desc_ptr != tx_queue->ptr_mask)
+ efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
+
+ tx_ev_desc_ptr = EFX_QWORD_FIELD(*event,
+ ESF_DZ_TX_DESCR_INDX);
+ tx_queue->completed_desc_ptr =
+ tx_ev_desc_ptr & tx_queue->ptr_mask;
+ break;
+
+ case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO:
+ ts_part = efx_ef10_extract_event_ts(event);
+ tx_queue->completed_timestamp_minor = ts_part;
+ break;
+
+ case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI:
+ ts_part = efx_ef10_extract_event_ts(event);
+ tx_queue->completed_timestamp_major = ts_part;
+
+ efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
+ tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
+ break;
+
+ default:
+ netif_err(efx, hw, efx->net_dev,
+ "channel %d unknown tx event type %d (data "
+ EFX_QWORD_FMT ")\n",
+ channel->channel, tx_ev_type,
+ EFX_QWORD_VAL(*event));
+ break;
+ }
}
static void
@@ -3513,7 +3756,6 @@ static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
efx_qword_t event, *p_event;
unsigned int read_ptr;
int ev_code;
- int tx_descs = 0;
int spent = 0;
if (quota <= 0)
@@ -3553,13 +3795,7 @@ static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
}
break;
case ESE_DZ_EV_CODE_TX_EV:
- tx_descs += efx_ef10_handle_tx_event(channel, &event);
- if (tx_descs > efx->txq_entries) {
- spent = quota;
- goto out;
- } else if (++spent == quota) {
- goto out;
- }
+ efx_ef10_handle_tx_event(channel, &event);
break;
case ESE_DZ_EV_CODE_DRIVER_EV:
efx_ef10_handle_driver_event(channel, &event);
@@ -6034,7 +6270,8 @@ static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
efx_ef10_rx_enable_timestamping :
efx_ef10_rx_disable_timestamping;
- efx_for_each_channel(channel, efx) {
+ channel = efx_ptp_channel(efx);
+ if (channel) {
int rc = set(channel, temp);
if (en && rc != 0) {
efx_ef10_ptp_set_ts_sync_events(efx, false, temp);
@@ -6392,7 +6629,7 @@ out_unlock:
const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.is_vf = true,
- .mem_bar = EFX_MEM_VF_BAR,
+ .mem_bar = efx_ef10_vf_mem_bar,
.mem_map_size = efx_ef10_mem_map_size,
.probe = efx_ef10_probe_vf,
.remove = efx_ef10_remove,
@@ -6500,7 +6737,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
const struct efx_nic_type efx_hunt_a0_nic_type = {
.is_vf = false,
- .mem_bar = EFX_MEM_BAR,
+ .mem_bar = efx_ef10_pf_mem_bar,
.mem_map_size = efx_ef10_mem_map_size,
.probe = efx_ef10_probe_pf,
.remove = efx_ef10_remove,
diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h
index 2c4bf9476c37..6a56778cf06c 100644
--- a/drivers/net/ethernet/sfc/ef10_regs.h
+++ b/drivers/net/ethernet/sfc/ef10_regs.h
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
- * Copyright 2012-2015 Solarflare Communications Inc.
+ * Copyright 2012-2017 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -79,6 +79,8 @@
#define ER_DZ_EVQ_TMR 0x00000420
#define ER_DZ_EVQ_TMR_STEP 8192
#define ER_DZ_EVQ_TMR_ROWS 2048
+#define ERF_FZ_TC_TMR_REL_VAL_LBN 16
+#define ERF_FZ_TC_TMR_REL_VAL_WIDTH 14
#define ERF_DZ_TC_TIMER_MODE_LBN 14
#define ERF_DZ_TC_TIMER_MODE_WIDTH 2
#define ERF_DZ_TC_TIMER_VAL_LBN 0
@@ -159,16 +161,24 @@
#define ESF_DZ_RX_EV_SOFT2_WIDTH 2
#define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48
#define ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4
-#define ESF_DZ_RX_L4_CLASS_LBN 45
-#define ESF_DZ_RX_L4_CLASS_WIDTH 3
-#define ESE_DZ_L4_CLASS_RSVD7 7
-#define ESE_DZ_L4_CLASS_RSVD6 6
-#define ESE_DZ_L4_CLASS_RSVD5 5
-#define ESE_DZ_L4_CLASS_RSVD4 4
-#define ESE_DZ_L4_CLASS_RSVD3 3
-#define ESE_DZ_L4_CLASS_UDP 2
-#define ESE_DZ_L4_CLASS_TCP 1
-#define ESE_DZ_L4_CLASS_UNKNOWN 0
+#define ESF_DE_RX_L4_CLASS_LBN 45
+#define ESF_DE_RX_L4_CLASS_WIDTH 3
+#define ESE_DE_L4_CLASS_RSVD7 7
+#define ESE_DE_L4_CLASS_RSVD6 6
+#define ESE_DE_L4_CLASS_RSVD5 5
+#define ESE_DE_L4_CLASS_RSVD4 4
+#define ESE_DE_L4_CLASS_RSVD3 3
+#define ESE_DE_L4_CLASS_UDP 2
+#define ESE_DE_L4_CLASS_TCP 1
+#define ESE_DE_L4_CLASS_UNKNOWN 0
+#define ESF_FZ_RX_FASTPD_INDCTR_LBN 47
+#define ESF_FZ_RX_FASTPD_INDCTR_WIDTH 1
+#define ESF_FZ_RX_L4_CLASS_LBN 45
+#define ESF_FZ_RX_L4_CLASS_WIDTH 2
+#define ESE_FZ_L4_CLASS_RSVD3 3
+#define ESE_FZ_L4_CLASS_UDP 2
+#define ESE_FZ_L4_CLASS_TCP 1
+#define ESE_FZ_L4_CLASS_UNKNOWN 0
#define ESF_DZ_RX_L3_CLASS_LBN 42
#define ESF_DZ_RX_L3_CLASS_WIDTH 3
#define ESE_DZ_L3_CLASS_RSVD7 7
@@ -215,6 +225,8 @@
#define ESF_EZ_RX_ABORT_WIDTH 1
#define ESF_DZ_RX_ECC_ERR_LBN 29
#define ESF_DZ_RX_ECC_ERR_WIDTH 1
+#define ESF_DZ_RX_TRUNC_ERR_LBN 29
+#define ESF_DZ_RX_TRUNC_ERR_WIDTH 1
#define ESF_DZ_RX_CRC1_ERR_LBN 28
#define ESF_DZ_RX_CRC1_ERR_WIDTH 1
#define ESF_DZ_RX_CRC0_ERR_LBN 27
@@ -332,6 +344,8 @@
#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
#define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48
@@ -341,7 +355,7 @@
#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
-/* TX_TSO_FATSO2A_DESC */
+/* TX_TSO_V2_DESC_A */
#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
#define ESF_DZ_TX_OPTION_TYPE_LBN 60
@@ -360,8 +374,7 @@
#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
-
-/* TX_TSO_FATSO2B_DESC */
+/* TX_TSO_V2_DESC_B */
#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
#define ESF_DZ_TX_OPTION_TYPE_LBN 60
@@ -375,11 +388,10 @@
#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
-#define ESF_DZ_TX_TSO_OUTER_IP_ID_LBN 0
-#define ESF_DZ_TX_TSO_OUTER_IP_ID_WIDTH 16
#define ESF_DZ_TX_TSO_TCP_MSS_LBN 32
#define ESF_DZ_TX_TSO_TCP_MSS_WIDTH 16
-
+#define ESF_DZ_TX_TSO_OUTER_IPID_LBN 0
+#define ESF_DZ_TX_TSO_OUTER_IPID_WIDTH 16
/*************************************************************************/
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index e3c492fcaff0..16757cfc5b29 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -27,6 +27,7 @@
#include <net/udp_tunnel.h>
#include "efx.h"
#include "nic.h"
+#include "io.h"
#include "selftest.h"
#include "sriov.h"
@@ -895,12 +896,20 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
}
+static bool efx_default_channel_want_txqs(struct efx_channel *channel)
+{
+ return channel->channel - channel->efx->tx_channel_offset <
+ channel->efx->n_tx_channels;
+}
+
static const struct efx_channel_type efx_default_channel_type = {
.pre_probe = efx_channel_dummy_op_int,
.post_remove = efx_channel_dummy_op_void,
.get_name = efx_get_channel_name,
.copy = efx_copy_channel,
+ .want_txqs = efx_default_channel_want_txqs,
.keep_eventq = false,
+ .want_pio = true,
};
int efx_channel_dummy_op_int(struct efx_channel *channel)
@@ -952,31 +961,42 @@ void efx_link_status_changed(struct efx_nic *efx)
netif_info(efx, link, efx->net_dev, "link down\n");
}
-void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
+void efx_link_set_advertising(struct efx_nic *efx,
+ const unsigned long *advertising)
{
- efx->link_advertising = advertising;
- if (advertising) {
- if (advertising & ADVERTISED_Pause)
- efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
- else
- efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
- if (advertising & ADVERTISED_Asym_Pause)
- efx->wanted_fc ^= EFX_FC_TX;
- }
+ memcpy(efx->link_advertising, advertising,
+ sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
+
+ efx->link_advertising[0] |= ADVERTISED_Autoneg;
+ if (advertising[0] & ADVERTISED_Pause)
+ efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
+ else
+ efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
+ if (advertising[0] & ADVERTISED_Asym_Pause)
+ efx->wanted_fc ^= EFX_FC_TX;
+}
+
+/* Equivalent to efx_link_set_advertising with all-zeroes, except does not
+ * force the Autoneg bit on.
+ */
+void efx_link_clear_advertising(struct efx_nic *efx)
+{
+ bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
}
void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
{
efx->wanted_fc = wanted_fc;
- if (efx->link_advertising) {
+ if (efx->link_advertising[0]) {
if (wanted_fc & EFX_FC_RX)
- efx->link_advertising |= (ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
+ efx->link_advertising[0] |= (ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
else
- efx->link_advertising &= ~(ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
+ efx->link_advertising[0] &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
if (wanted_fc & EFX_FC_TX)
- efx->link_advertising ^= ADVERTISED_Asym_Pause;
+ efx->link_advertising[0] ^= ADVERTISED_Asym_Pause;
}
}
@@ -1248,7 +1268,7 @@ static int efx_init_io(struct efx_nic *efx)
netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
- bar = efx->type->mem_bar;
+ bar = efx->type->mem_bar(efx);
rc = pci_enable_device(pci_dev);
if (rc) {
@@ -1323,7 +1343,7 @@ static void efx_fini_io(struct efx_nic *efx)
}
if (efx->membase_phys) {
- bar = efx->type->mem_bar;
+ bar = efx->type->mem_bar(efx);
pci_release_region(efx->pci_dev, bar);
efx->membase_phys = 0;
}
@@ -1489,6 +1509,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
}
/* Assign extra channels if possible */
+ efx->n_extra_tx_channels = 0;
j = efx->n_channels;
for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
if (!efx->extra_channel_type[i])
@@ -1500,6 +1521,8 @@ static int efx_probe_interrupts(struct efx_nic *efx)
--j;
efx_get_channel(efx, j)->type =
efx->extra_channel_type[i];
+ if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
+ efx->n_extra_tx_channels++;
}
}
@@ -2909,6 +2932,10 @@ static const struct pci_device_id efx_pci_table[] = {
.driver_data = (unsigned long) &efx_hunt_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1a03), /* SFC9220 VF */
.driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0b03), /* SFC9250 PF */
+ .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03), /* SFC9250 VF */
+ .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
{0} /* end of list */
};
@@ -2977,6 +3004,9 @@ static int efx_init_struct(struct efx_nic *efx,
efx->rx_packet_ts_offset =
efx->type->rx_ts_offset - efx->type->rx_prefix_size;
spin_lock_init(&efx->stats_lock);
+ efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
+ efx->num_mac_stats = MC_CMD_MAC_NSTATS;
+ BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END);
mutex_init(&efx->mac_lock);
efx->phy_op = &efx_dummy_phy_operations;
efx->mdio.dev = net_dev;
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 52c84b782901..0cddc5ad77b1 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -14,11 +14,6 @@
#include "net_driver.h"
#include "filter.h"
-/* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
-/* All VFs use BAR 0/1 for memory */
-#define EFX_MEM_BAR 2
-#define EFX_MEM_VF_BAR 0
-
int efx_net_open(struct net_device *net_dev);
int efx_net_stop(struct net_device *net_dev);
@@ -263,7 +258,9 @@ static inline void efx_schedule_channel_irq(struct efx_channel *channel)
}
void efx_link_status_changed(struct efx_nic *efx);
-void efx_link_set_advertising(struct efx_nic *efx, u32);
+void efx_link_set_advertising(struct efx_nic *efx,
+ const unsigned long *advertising);
+void efx_link_clear_advertising(struct efx_nic *efx);
void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
static inline void efx_device_detach_sync(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 3747b5644110..4db2dc2bf52f 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -720,7 +720,7 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
goto out;
}
- if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) {
+ if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising[0]) {
netif_dbg(efx, drv, efx->net_dev,
"Autonegotiation is disabled\n");
rc = -EINVAL;
@@ -732,10 +732,10 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
(wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX))
efx->type->prepare_enable_fc_tx(efx);
- old_adv = efx->link_advertising;
+ old_adv = efx->link_advertising[0];
old_fc = efx->wanted_fc;
efx_link_set_wanted_fc(efx, wanted_fc);
- if (efx->link_advertising != old_adv ||
+ if (efx->link_advertising[0] != old_adv ||
(efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
rc = efx->phy_op->reconfigure(efx);
if (rc) {
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 5334dc83d926..266b9bee1f3a 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -818,17 +818,16 @@ static void efx_farch_magic_event(struct efx_channel *channel, u32 magic)
* The NIC batches TX completion events; the message we receive is of
* the form "complete all TX events up to this index".
*/
-static int
+static void
efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
{
unsigned int tx_ev_desc_ptr;
unsigned int tx_ev_q_label;
struct efx_tx_queue *tx_queue;
struct efx_nic *efx = channel->efx;
- int tx_packets = 0;
if (unlikely(READ_ONCE(efx->reset_pending)))
- return 0;
+ return;
if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
/* Transmit completion */
@@ -836,8 +835,6 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
tx_queue = efx_channel_get_tx_queue(
channel, tx_ev_q_label % EFX_TXQ_TYPES);
- tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
- tx_queue->ptr_mask);
efx_xmit_done(tx_queue, tx_ev_desc_ptr);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
/* Rewrite the FIFO write pointer */
@@ -856,8 +853,6 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
EFX_QWORD_FMT"\n", channel->channel,
EFX_QWORD_VAL(*event));
}
-
- return tx_packets;
}
/* Detect errors included in the rx_evt_pkt_ok bit. */
@@ -1090,7 +1085,7 @@ efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
int qid;
qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
- if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
+ if (qid < EFX_TXQ_TYPES * (efx->n_tx_channels + efx->n_extra_tx_channels)) {
tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
qid % EFX_TXQ_TYPES);
if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
@@ -1270,7 +1265,6 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget)
unsigned int read_ptr;
efx_qword_t event, *p_event;
int ev_code;
- int tx_packets = 0;
int spent = 0;
if (budget <= 0)
@@ -1304,12 +1298,7 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget)
goto out;
break;
case FSE_AZ_EV_CODE_TX_EV:
- tx_packets += efx_farch_handle_tx_event(channel,
- &event);
- if (tx_packets > efx->txq_entries) {
- spent = budget;
- goto out;
- }
+ efx_farch_handle_tx_event(channel, &event);
break;
case FSE_AZ_EV_CODE_DRV_GEN_EV:
efx_farch_handle_generated_event(channel, &event);
@@ -1680,20 +1669,21 @@ void efx_farch_rx_pull_indir_table(struct efx_nic *efx)
*/
void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
{
- unsigned vi_count, buftbl_min;
+ unsigned vi_count, buftbl_min, total_tx_channels;
#ifdef CONFIG_SFC_SRIOV
struct siena_nic_data *nic_data = efx->nic_data;
#endif
+ total_tx_channels = efx->n_tx_channels + efx->n_extra_tx_channels;
/* Account for the buffer table entries backing the datapath channels
* and the descriptor caches for those channels.
*/
buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
- efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
+ total_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
efx->n_channels * EFX_MAX_EVQ_SIZE)
* sizeof(efx_qword_t) / EFX_BUF_SIZE);
- vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+ vi_count = max(efx->n_channels, total_tx_channels * EFX_TXQ_TYPES);
#ifdef CONFIG_SFC_SRIOV
if (efx->type->sriov_wanted) {
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index afb94aa2c15e..89563170af52 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -222,18 +222,21 @@ static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
}
-/* Page size used as step between per-VI registers */
-#define EFX_VI_PAGE_SIZE 0x2000
+/* default VI stride (step between per-VI registers) is 8K */
+#define EFX_DEFAULT_VI_STRIDE 0x2000
/* Calculate offset to page-mapped register */
-#define EFX_PAGED_REG(page, reg) \
- ((page) * EFX_VI_PAGE_SIZE + (reg))
+static inline unsigned int efx_paged_reg(struct efx_nic *efx, unsigned int page,
+ unsigned int reg)
+{
+ return page * efx->vi_stride + reg;
+}
/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
unsigned int reg, unsigned int page)
{
- reg = EFX_PAGED_REG(page, reg);
+ reg = efx_paged_reg(efx, page, reg);
netif_vdbg(efx, hw, efx->net_dev,
"writing register %x with " EFX_OWORD_FMT "\n", reg,
@@ -262,7 +265,7 @@ static inline void
_efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
unsigned int reg, unsigned int page)
{
- efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+ efx_writed(efx, value, efx_paged_reg(efx, page, reg));
}
#define efx_writed_page(efx, value, reg, page) \
_efx_writed_page(efx, value, \
@@ -288,10 +291,10 @@ static inline void _efx_writed_page_locked(struct efx_nic *efx,
if (page == 0) {
spin_lock_irqsave(&efx->biu_lock, flags);
- efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+ efx_writed(efx, value, efx_paged_reg(efx, page, reg));
spin_unlock_irqrestore(&efx->biu_lock, flags);
} else {
- efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+ efx_writed(efx, value, efx_paged_reg(efx, page, reg));
}
}
#define efx_writed_page_locked(efx, value, reg, page) \
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 154ef41d1927..ebd95972ae7b 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -208,6 +208,9 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
#define _MCDI_DWORD(_buf, _field) \
((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
+#define MCDI_BYTE(_buf, _field) \
+ ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
+ *MCDI_PTR(_buf, _field))
#define MCDI_WORD(_buf, _field) \
((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 91fb54fd03d9..869d76f8f589 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -114,6 +114,8 @@
#define MCDI_HEADER_XFLAGS_WIDTH 8
/* Request response using event */
#define MCDI_HEADER_XFLAGS_EVREQ 0x01
+/* Request (and signal) early doorbell return */
+#define MCDI_HEADER_XFLAGS_DBRET 0x02
/* Maximum number of payload bytes */
#define MCDI_CTL_SDU_LEN_MAX_V1 0xfc
@@ -123,7 +125,7 @@
/* The MC can generate events for two reasons:
- * - To complete a shared memory request if XFLAGS_EVREQ was set
+ * - To advance a shared memory request if XFLAGS_EVREQ was set
* - As a notification (link state, i2c event), controlled
* via MC_CMD_LOG_CTRL
*
@@ -279,6 +281,17 @@
/* Returned by MC_CMD_TESTASSERT if the action that should
* have caused an assertion failed to do so. */
#define MC_CMD_ERR_UNREACHABLE 0x1016
+/* This command needs to be processed in the background but there were no
+ * resources to do so. Send it again after a command has completed. */
+#define MC_CMD_ERR_QUEUE_FULL 0x1017
+/* The operation could not be completed because the PCIe link has gone
+ * away. This error code is never expected to be returned over the TLP
+ * transport. */
+#define MC_CMD_ERR_NO_PCIE 0x1018
+/* The operation could not be completed because the datapath has gone
+ * away. This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the
+ * datapath absence may be temporary*/
+#define MC_CMD_ERR_NO_DATAPATH 0x1019
#define MC_CMD_ERR_CODE_OFST 0
@@ -360,6 +373,7 @@
/* enum: Fatal. */
#define MCDI_EVENT_LEVEL_FATAL 0x3
#define MCDI_EVENT_DATA_OFST 0
+#define MCDI_EVENT_DATA_LEN 4
#define MCDI_EVENT_CMDDONE_SEQ_LBN 0
#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
#define MCDI_EVENT_CMDDONE_DATALEN_LBN 8
@@ -370,6 +384,8 @@
#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
+/* enum: Link is down or link speed could not be determined */
+#define MCDI_EVENT_LINKCHANGE_SPEED_UNKNOWN 0x0
/* enum: 100Mbs */
#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1
/* enum: 1Gbs */
@@ -378,6 +394,12 @@
#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3
/* enum: 40Gbs */
#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4
+/* enum: 25Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_25G 0x5
+/* enum: 50Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_50G 0x6
+/* enum: 100Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_100G 0x7
#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
@@ -456,8 +478,63 @@
#define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
/* enum: PTP status update */
#define MCDI_EVENT_AOE_PTP_STATUS 0xb
+/* enum: FPGA header incorrect */
+#define MCDI_EVENT_AOE_FPGA_LOAD_HEADER_ERR 0xc
+/* enum: FPGA Powered Off due to error in powering up FPGA */
+#define MCDI_EVENT_AOE_FPGA_POWER_OFF 0xd
+/* enum: AOE FPGA load failed due to MC to MUM communication failure */
+#define MCDI_EVENT_AOE_FPGA_LOAD_FAILED 0xe
+/* enum: Notify that invalid flash type detected */
+#define MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf
+/* enum: Notify that the attempt to run FPGA Controller firmware timedout */
+#define MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10
+/* enum: Failure to probe one or more FPGA boot flash chips */
+#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_INVALID 0x11
+/* enum: FPGA boot-flash contains an invalid image header */
+#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_HDR_INVALID 0x12
+/* enum: Failed to program clocks required by the FPGA */
+#define MCDI_EVENT_AOE_FPGA_CLOCKS_PROGRAM_FAILED 0x13
+/* enum: Notify that FPGA Controller is alive to serve MCDI requests */
+#define MCDI_EVENT_AOE_FC_RUNNING 0x14
#define MCDI_EVENT_AOE_ERR_DATA_LBN 8
#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_LBN 8
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8
+/* enum: FC Assert happened, but the register information is not available */
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_SEEN 0x0
+/* enum: The register information for FC Assert is ready for readinng by driver
+ */
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_WIDTH 8
+/* enum: Reading from NV failed */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_NV_READ_FAIL 0x0
+/* enum: Invalid Magic Number if FPGA header */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_MAGIC_FAIL 0x1
+/* enum: Invalid Silicon type detected in header */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_SILICON_TYPE 0x2
+/* enum: Unsupported VRatio */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_VRATIO 0x3
+/* enum: Unsupported DDR Type */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_TYPE 0x4
+/* enum: DDR Voltage out of supported range */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_VOLTAGE 0x5
+/* enum: Unsupported DDR speed */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SPEED 0x6
+/* enum: Unsupported DDR size */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SIZE 0x7
+/* enum: Unsupported DDR rank */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_RANK 0x8
+#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_WIDTH 8
+/* enum: Primary boot flash */
+#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_PRIMARY 0x0
+/* enum: Secondary boot flash */
+#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_SECONDARY 0x1
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_WIDTH 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_WIDTH 8
#define MCDI_EVENT_RX_ERR_RXQ_LBN 0
#define MCDI_EVENT_RX_ERR_RXQ_WIDTH 12
#define MCDI_EVENT_RX_ERR_TYPE_LBN 12
@@ -480,6 +557,22 @@
#define MCDI_EVENT_MUM_WATCHDOG 0x3
#define MCDI_EVENT_MUM_ERR_DATA_LBN 8
#define MCDI_EVENT_MUM_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_DBRET_SEQ_LBN 0
+#define MCDI_EVENT_DBRET_SEQ_WIDTH 8
+#define MCDI_EVENT_SUC_ERR_TYPE_LBN 0
+#define MCDI_EVENT_SUC_ERR_TYPE_WIDTH 8
+/* enum: Corrupted or bad SUC application. */
+#define MCDI_EVENT_SUC_BAD_APP 0x1
+/* enum: SUC application reported an assert. */
+#define MCDI_EVENT_SUC_ASSERT 0x2
+/* enum: SUC application reported an exception. */
+#define MCDI_EVENT_SUC_EXCEPTION 0x3
+/* enum: SUC watchdog timer expired. */
+#define MCDI_EVENT_SUC_WATCHDOG 0x4
+#define MCDI_EVENT_SUC_ERR_ADDRESS_LBN 8
+#define MCDI_EVENT_SUC_ERR_ADDRESS_WIDTH 24
+#define MCDI_EVENT_SUC_ERR_DATA_LBN 8
+#define MCDI_EVENT_SUC_ERR_DATA_WIDTH 24
#define MCDI_EVENT_DATA_LBN 0
#define MCDI_EVENT_DATA_WIDTH 32
#define MCDI_EVENT_SRC_LBN 36
@@ -552,73 +645,99 @@
* been processed and it may now resend the command
*/
#define MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d
+/* enum: MCDI command accepted. New commands can be issued but this command is
+ * not done yet.
+ */
+#define MCDI_EVENT_CODE_DBRET 0x1e
+/* enum: The MC has detected a fault on the SUC */
+#define MCDI_EVENT_CODE_SUC 0x1f
/* enum: Artificial event generated by host and posted via MC for test
* purposes.
*/
#define MCDI_EVENT_CODE_TESTGEN 0xfa
#define MCDI_EVENT_CMDDONE_DATA_OFST 0
+#define MCDI_EVENT_CMDDONE_DATA_LEN 4
#define MCDI_EVENT_CMDDONE_DATA_LBN 0
#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32
#define MCDI_EVENT_LINKCHANGE_DATA_OFST 0
+#define MCDI_EVENT_LINKCHANGE_DATA_LEN 4
#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0
#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32
#define MCDI_EVENT_SENSOREVT_DATA_OFST 0
+#define MCDI_EVENT_SENSOREVT_DATA_LEN 4
#define MCDI_EVENT_SENSOREVT_DATA_LBN 0
#define MCDI_EVENT_SENSOREVT_DATA_WIDTH 32
#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LEN 4
#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0
#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32
#define MCDI_EVENT_TX_ERR_DATA_OFST 0
+#define MCDI_EVENT_TX_ERR_DATA_LEN 4
#define MCDI_EVENT_TX_ERR_DATA_LBN 0
#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32
/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of
* timestamp
*/
#define MCDI_EVENT_PTP_SECONDS_OFST 0
+#define MCDI_EVENT_PTP_SECONDS_LEN 4
#define MCDI_EVENT_PTP_SECONDS_LBN 0
#define MCDI_EVENT_PTP_SECONDS_WIDTH 32
/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of
* timestamp
*/
#define MCDI_EVENT_PTP_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_MAJOR_LEN 4
#define MCDI_EVENT_PTP_MAJOR_LBN 0
#define MCDI_EVENT_PTP_MAJOR_WIDTH 32
/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field
* of timestamp
*/
#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0
+#define MCDI_EVENT_PTP_NANOSECONDS_LEN 4
#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0
#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of
* timestamp
*/
#define MCDI_EVENT_PTP_MINOR_OFST 0
+#define MCDI_EVENT_PTP_MINOR_LEN 4
#define MCDI_EVENT_PTP_MINOR_LBN 0
#define MCDI_EVENT_PTP_MINOR_WIDTH 32
/* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet
*/
#define MCDI_EVENT_PTP_UUID_OFST 0
+#define MCDI_EVENT_PTP_UUID_LEN 4
#define MCDI_EVENT_PTP_UUID_LBN 0
#define MCDI_EVENT_PTP_UUID_WIDTH 32
#define MCDI_EVENT_RX_ERR_DATA_OFST 0
+#define MCDI_EVENT_RX_ERR_DATA_LEN 4
#define MCDI_EVENT_RX_ERR_DATA_LBN 0
#define MCDI_EVENT_RX_ERR_DATA_WIDTH 32
#define MCDI_EVENT_PAR_ERR_DATA_OFST 0
+#define MCDI_EVENT_PAR_ERR_DATA_LEN 4
#define MCDI_EVENT_PAR_ERR_DATA_LBN 0
#define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32
#define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_LEN 4
#define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0
#define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LEN 4
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
/* For CODE_PTP_TIME events, the major value of the PTP clock */
#define MCDI_EVENT_PTP_TIME_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_LEN 4
#define MCDI_EVENT_PTP_TIME_MAJOR_LBN 0
#define MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32
/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
#define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
#define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
+/* For CODE_PTP_TIME events, most significant bits of the minor value of the
+ * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_19.
+ */
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_LBN 36
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_WIDTH 8
/* For CODE_PTP_TIME events where report sync status is enabled, indicates
* whether the NIC clock has ever been set
*/
@@ -634,10 +753,17 @@
*/
#define MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38
#define MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6
+/* For CODE_PTP_TIME events, most significant bits of the minor value of the
+ * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_21.
+ */
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_LBN 38
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_WIDTH 6
#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LEN 4
#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0
#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32
#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LEN 4
#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0
#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32
/* Zero means that the request has been completed or authorized, and the driver
@@ -646,6 +772,10 @@
*/
#define MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36
#define MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8
+#define MCDI_EVENT_DBRET_DATA_OFST 0
+#define MCDI_EVENT_DBRET_DATA_LEN 4
+#define MCDI_EVENT_DBRET_DATA_LBN 0
+#define MCDI_EVENT_DBRET_DATA_WIDTH 32
/* FCDI_EVENT structuredef */
#define FCDI_EVENT_LEN 8
@@ -662,6 +792,7 @@
/* enum: Fatal. */
#define FCDI_EVENT_LEVEL_FATAL 0x3
#define FCDI_EVENT_DATA_OFST 0
+#define FCDI_EVENT_DATA_LEN 4
#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0
#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */
@@ -701,6 +832,7 @@
#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */
#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LEN 4
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
#define FCDI_EVENT_ASSERT_TYPE_LBN 36
@@ -708,12 +840,15 @@
#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LEN 4
#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
+#define FCDI_EVENT_LINK_STATE_DATA_LEN 4
#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
#define FCDI_EVENT_PTP_STATE_OFST 0
+#define FCDI_EVENT_PTP_STATE_LEN 4
#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
@@ -722,6 +857,7 @@
#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LEN 4
#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
/* Index of MC port being referred to */
@@ -729,9 +865,11 @@
#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
/* FC Port index that matches the MC port index in SRC */
#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
+#define FCDI_EVENT_PORT_CONFIG_DATA_LEN 4
#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
#define FCDI_EVENT_BOOT_RESULT_OFST 0
+#define FCDI_EVENT_BOOT_RESULT_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */
#define FCDI_EVENT_BOOT_RESULT_LBN 0
@@ -748,14 +886,17 @@
#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
/* Number of timestamps following */
#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4
#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
/* Seconds field of a timestamp record */
#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LEN 4
#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
/* Nanoseconds field of a timestamp record */
#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LEN 4
#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
/* Timestamp records comprising the event */
@@ -783,6 +924,7 @@
/* enum: Fatal. */
#define MUM_EVENT_LEVEL_FATAL 0x3
#define MUM_EVENT_DATA_OFST 0
+#define MUM_EVENT_DATA_LEN 4
#define MUM_EVENT_SENSOR_ID_LBN 0
#define MUM_EVENT_SENSOR_ID_WIDTH 8
/* Enum values, see field(s): */
@@ -820,18 +962,23 @@
/* enum: Link fault has been asserted, or has cleared. */
#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
#define MUM_EVENT_SENSOR_DATA_OFST 0
+#define MUM_EVENT_SENSOR_DATA_LEN 4
#define MUM_EVENT_SENSOR_DATA_LBN 0
#define MUM_EVENT_SENSOR_DATA_WIDTH 32
#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0
+#define MUM_EVENT_PORT_PHY_FLAGS_LEN 4
#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0
#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_LEN 4
#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
#define MUM_EVENT_PORT_PHY_CAPS_OFST 0
+#define MUM_EVENT_PORT_PHY_CAPS_LEN 4
#define MUM_EVENT_PORT_PHY_CAPS_LBN 0
#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
#define MUM_EVENT_PORT_PHY_TECH_OFST 0
+#define MUM_EVENT_PORT_PHY_TECH_LEN 4
#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
@@ -864,7 +1011,9 @@
/* MC_CMD_READ32_IN msgrequest */
#define MC_CMD_READ32_IN_LEN 8
#define MC_CMD_READ32_IN_ADDR_OFST 0
+#define MC_CMD_READ32_IN_ADDR_LEN 4
#define MC_CMD_READ32_IN_NUMWORDS_OFST 4
+#define MC_CMD_READ32_IN_NUMWORDS_LEN 4
/* MC_CMD_READ32_OUT msgresponse */
#define MC_CMD_READ32_OUT_LENMIN 4
@@ -882,13 +1031,14 @@
*/
#define MC_CMD_WRITE32 0x2
-#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_WRITE32_IN msgrequest */
#define MC_CMD_WRITE32_IN_LENMIN 8
#define MC_CMD_WRITE32_IN_LENMAX 252
#define MC_CMD_WRITE32_IN_LEN(num) (4+4*(num))
#define MC_CMD_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_WRITE32_IN_ADDR_LEN 4
#define MC_CMD_WRITE32_IN_BUFFER_OFST 4
#define MC_CMD_WRITE32_IN_BUFFER_LEN 4
#define MC_CMD_WRITE32_IN_BUFFER_MINNUM 1
@@ -915,6 +1065,7 @@
* is a bitfield, with each bit as documented below.
*/
#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+#define MC_CMD_COPYCODE_IN_SRC_ADDR_LEN 4
/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
@@ -940,9 +1091,12 @@
#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1
/* Destination address */
#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
+#define MC_CMD_COPYCODE_IN_DEST_ADDR_LEN 4
#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
+#define MC_CMD_COPYCODE_IN_NUMWORDS_LEN 4
/* Address of where to jump after copy. */
#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
+#define MC_CMD_COPYCODE_IN_JUMP_LEN 4
/* enum: Control should return to the caller rather than jumping */
#define MC_CMD_COPYCODE_JUMP_NONE 0x1
@@ -956,12 +1110,13 @@
*/
#define MC_CMD_SET_FUNC 0x4
-#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_SET_FUNC_IN msgrequest */
#define MC_CMD_SET_FUNC_IN_LEN 4
/* Set function */
#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
+#define MC_CMD_SET_FUNC_IN_FUNC_LEN 4
/* MC_CMD_SET_FUNC_OUT msgresponse */
#define MC_CMD_SET_FUNC_OUT_LEN 0
@@ -973,7 +1128,7 @@
*/
#define MC_CMD_GET_BOOT_STATUS 0x5
-#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_GET_BOOT_STATUS_IN msgrequest */
#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0
@@ -982,9 +1137,11 @@
#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
/* ?? */
#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_LEN 4
/* enum: indicates that the MC wasn't flash booted */
#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_LEN 4
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1
@@ -1007,11 +1164,13 @@
#define MC_CMD_GET_ASSERTS_IN_LEN 4
/* Set to clear assertion */
#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
+#define MC_CMD_GET_ASSERTS_IN_CLEAR_LEN 4
/* MC_CMD_GET_ASSERTS_OUT msgresponse */
#define MC_CMD_GET_ASSERTS_OUT_LEN 140
/* Assertion status flag. */
#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
+#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_LEN 4
/* enum: No assertions have failed. */
#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1
/* enum: A system-level assertion has failed. */
@@ -1024,6 +1183,7 @@
#define MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5
/* Failing PC value */
#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
+#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_LEN 4
/* Saved GP regs */
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
@@ -1034,7 +1194,9 @@
#define MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057
/* Failing thread address */
#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
+#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_LEN 4
#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
+#define MC_CMD_GET_ASSERTS_OUT_RESERVED_LEN 4
/***********************************/
@@ -1050,12 +1212,14 @@
#define MC_CMD_LOG_CTRL_IN_LEN 8
/* Log destination */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_LEN 4
/* enum: UART. */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
/* enum: Event queue. */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
/* Legacy argument. Must be zero. */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_LEN 4
/* MC_CMD_LOG_CTRL_OUT msgresponse */
#define MC_CMD_LOG_CTRL_OUT_LEN 0
@@ -1076,23 +1240,29 @@
#define MC_CMD_GET_VERSION_EXT_IN_LEN 4
/* placeholder, set to 0 */
#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0
+#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_LEN 4
/* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */
#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4
/* enum: Reserved version number to indicate "any" version. */
#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
/* enum: Bootrom version value for Siena. */
#define MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000
/* enum: Bootrom version value for Huntington. */
#define MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001
+/* enum: Bootrom version value for Medford2. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_MEDFORD2_BOOTROM 0xb0070002
/* MC_CMD_GET_VERSION_OUT msgresponse */
#define MC_CMD_GET_VERSION_OUT_LEN 32
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_OUT_PCOL_LEN 4
/* 128bit mask of functions supported by the current firmware */
#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8
#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16
@@ -1104,9 +1274,11 @@
/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_LEN 4
/* 128bit mask of functions supported by the current firmware */
#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8
#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16
@@ -1136,41 +1308,54 @@
#define MC_CMD_PTP_OP_ENABLE 0x1
/* enum: Disable PTP packet timestamping operation. */
#define MC_CMD_PTP_OP_DISABLE 0x2
-/* enum: Send a PTP packet. */
+/* enum: Send a PTP packet. This operation is used on Siena and Huntington.
+ * From Medford onwards it is not supported: on those platforms PTP transmit
+ * timestamping is done using the fast path.
+ */
#define MC_CMD_PTP_OP_TRANSMIT 0x3
/* enum: Read the current NIC time. */
#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4
-/* enum: Get the current PTP status. */
+/* enum: Get the current PTP status. Note that the clock frequency returned (in
+ * Hz) is rounded to the nearest MHz (e.g. 666000000 for 666666666).
+ */
#define MC_CMD_PTP_OP_STATUS 0x5
/* enum: Adjust the PTP NIC's time. */
#define MC_CMD_PTP_OP_ADJUST 0x6
/* enum: Synchronize host and NIC time. */
#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7
-/* enum: Basic manufacturing tests. */
+/* enum: Basic manufacturing tests. Siena PTP adapters only. */
#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8
-/* enum: Packet based manufacturing tests. */
+/* enum: Packet based manufacturing tests. Siena PTP adapters only. */
#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9
/* enum: Reset some of the PTP related statistics */
#define MC_CMD_PTP_OP_RESET_STATS 0xa
/* enum: Debug operations to MC. */
#define MC_CMD_PTP_OP_DEBUG 0xb
-/* enum: Read an FPGA register */
+/* enum: Read an FPGA register. Siena PTP adapters only. */
#define MC_CMD_PTP_OP_FPGAREAD 0xc
-/* enum: Write an FPGA register */
+/* enum: Write an FPGA register. Siena PTP adapters only. */
#define MC_CMD_PTP_OP_FPGAWRITE 0xd
/* enum: Apply an offset to the NIC clock */
#define MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe
-/* enum: Change Apply an offset to the NIC clock */
+/* enum: Change the frequency correction applied to the NIC clock */
#define MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf
-/* enum: Set the MC packet filter VLAN tags for received PTP packets */
+/* enum: Set the MC packet filter VLAN tags for received PTP packets.
+ * Deprecated for Huntington onwards.
+ */
#define MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10
-/* enum: Set the MC packet filter UUID for received PTP packets */
+/* enum: Set the MC packet filter UUID for received PTP packets. Deprecated for
+ * Huntington onwards.
+ */
#define MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11
-/* enum: Set the MC packet filter Domain for received PTP packets */
+/* enum: Set the MC packet filter Domain for received PTP packets. Deprecated
+ * for Huntington onwards.
+ */
#define MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12
-/* enum: Set the clock source */
+/* enum: Set the clock source. Required for snapper tests on Huntington and
+ * Medford. Not implemented for Siena or Medford2.
+ */
#define MC_CMD_PTP_OP_SET_CLK_SRC 0x13
-/* enum: Reset value of Timer Reg. */
+/* enum: Reset value of Timer Reg. Not implemented. */
#define MC_CMD_PTP_OP_RST_CLK 0x14
/* enum: Enable the forwarding of PPS events to the host */
#define MC_CMD_PTP_OP_PPS_ENABLE 0x15
@@ -1191,7 +1376,7 @@
/* enum: Unsubscribe to stop receiving time events */
#define MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19
/* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS
- * input on the same NIC.
+ * input on the same NIC. Siena PTP adapters only.
*/
#define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
/* enum: Set the PTP sync status. Status is used by firmware to report to event
@@ -1204,11 +1389,15 @@
/* MC_CMD_PTP_IN_ENABLE msgrequest */
#define MC_CMD_PTP_IN_ENABLE_LEN 16
#define MC_CMD_PTP_IN_CMD_OFST 0
+#define MC_CMD_PTP_IN_CMD_LEN 4
#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4
-/* Event queue for PTP events */
+#define MC_CMD_PTP_IN_PERIPH_ID_LEN 4
+/* Not used. Events are always sent to function relative queue 0. */
#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
-/* PTP timestamping mode */
+#define MC_CMD_PTP_IN_ENABLE_QUEUE_LEN 4
+/* PTP timestamping mode. Not used from Huntington onwards. */
#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12
+#define MC_CMD_PTP_IN_ENABLE_MODE_LEN 4
/* enum: PTP, version 1 */
#define MC_CMD_PTP_MODE_V1 0x0
/* enum: PTP, version 1, with VLAN headers - deprecated */
@@ -1225,16 +1414,21 @@
/* MC_CMD_PTP_IN_DISABLE msgrequest */
#define MC_CMD_PTP_IN_DISABLE_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_TRANSMIT msgrequest */
#define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13
#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 252
#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Transmit packet length */
#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8
+#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_LEN 4
/* Transmit packet data */
#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
@@ -1244,17 +1438,30 @@
/* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */
#define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_READ_NIC_TIME_V2 msgrequest */
+#define MC_CMD_PTP_IN_READ_NIC_TIME_V2_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_STATUS msgrequest */
#define MC_CMD_PTP_IN_STATUS_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_ADJUST msgrequest */
#define MC_CMD_PTP_IN_ADJUST_LEN 24
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Frequency adjustment 40 bit fixed point ns */
#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
@@ -1262,21 +1469,67 @@
#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
/* enum: Number of fractional bits in frequency adjustment */
#define MC_CMD_PTP_IN_ADJUST_BITS 0x28
+/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
+ * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES
+ * field.
+ */
+#define MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c
/* Time adjustment in seconds */
#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_SECONDS_LEN 4
/* Time adjustment major value */
#define MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_MAJOR_LEN 4
/* Time adjustment in nanoseconds */
#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_LEN 4
/* Time adjustment minor value */
#define MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_MINOR_LEN 4
+
+/* MC_CMD_PTP_IN_ADJUST_V2 msgrequest */
+#define MC_CMD_PTP_IN_ADJUST_V2_LEN 28
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LEN 8
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
+ * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES
+ * field.
+ */
+/* MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c */
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_LEN 4
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_LEN 4
+/* Upper 32bits of major time offset adjustment */
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_OFST 24
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_LEN 4
/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Number of time readings to capture */
#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8
+#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_LEN 4
/* Host address in which to write "synchronization started" indication (64
* bits)
*/
@@ -1288,42 +1541,59 @@
/* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */
#define MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */
#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Enable or disable packet testing */
#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_LEN 4
/* MC_CMD_PTP_IN_RESET_STATS msgrequest */
#define MC_CMD_PTP_IN_RESET_STATS_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* Reset PTP statistics */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_DEBUG msgrequest */
#define MC_CMD_PTP_IN_DEBUG_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Debug operations */
#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8
+#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_LEN 4
/* MC_CMD_PTP_IN_FPGAREAD msgrequest */
#define MC_CMD_PTP_IN_FPGAREAD_LEN 16
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
#define MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAREAD_ADDR_LEN 4
#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12
+#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_LEN 4
/* MC_CMD_PTP_IN_FPGAWRITE msgrequest */
#define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13
#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252
#define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num))
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_LEN 4
#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12
#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1
#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1
@@ -1332,34 +1602,67 @@
/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Time adjustment in seconds */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_LEN 4
/* Time adjustment major value */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_LEN 4
/* Time adjustment in nanoseconds */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_LEN 4
/* Time adjustment minor value */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_LEN 4
+
+/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2 msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_LEN 4
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_LEN 4
+/* Upper 32bits of major time offset adjustment */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_OFST 16
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_LEN 4
/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Frequency adjustment 40 bit fixed point ns */
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12
-/* enum: Number of fractional bits in frequency adjustment */
-/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+/* Enum values, see field(s): */
+/* MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST/FREQ */
/* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */
#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Number of VLAN tags, 0 if not VLAN */
#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_LEN 4
/* Set of VLAN tags to filter against */
#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12
#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4
@@ -1368,9 +1671,12 @@
/* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* 1 to enable UUID filtering, 0 to disable */
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_LEN 4
/* UUID to filter against */
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8
@@ -1380,18 +1686,25 @@
/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */
#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* 1 to enable Domain filtering, 0 to disable */
#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_LEN 4
/* Domain number to filter against */
#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_LEN 4
/* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */
#define MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Set the clock source. */
#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8
+#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_LEN 4
/* enum: Internal. */
#define MC_CMD_PTP_CLK_SRC_INTERNAL 0x0
/* enum: External. */
@@ -1400,42 +1713,56 @@
/* MC_CMD_PTP_IN_RST_CLK msgrequest */
#define MC_CMD_PTP_IN_RST_CLK_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* Reset value of Timer Reg. */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */
#define MC_CMD_PTP_IN_PPS_ENABLE_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* Enable or disable */
#define MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4
+#define MC_CMD_PTP_IN_PPS_ENABLE_OP_LEN 4
/* enum: Enable */
#define MC_CMD_PTP_ENABLE_PPS 0x0
/* enum: Disable */
#define MC_CMD_PTP_DISABLE_PPS 0x1
-/* Queue id to send events back */
+/* Not used. Events are always sent to function relative queue 0. */
#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
+#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_LEN 4
/* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */
#define MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */
#define MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */
#define MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Original field containing queue ID. Now extended to include flags. */
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_LEN 4
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31
@@ -1444,29 +1771,39 @@
/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Unsubscribe options */
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_LEN 4
/* enum: Unsubscribe a single queue */
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0
/* enum: Unsubscribe all queues */
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1
/* Event queue ID */
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_LEN 4
/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* 1 to enable PPS test mode, 0 to disable and return result. */
#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_LEN 4
/* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* NIC - Host System Clock Synchronization status */
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_LEN 4
/* enum: Host System clock and NIC clock are not in sync */
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0
/* enum: Host System clock and NIC clock are synchronized */
@@ -1475,8 +1812,11 @@
* no longer in sync.
*/
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_LEN 4
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_LEN 4
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_LEN 4
/* MC_CMD_PTP_OUT msgresponse */
#define MC_CMD_PTP_OUT_LEN 0
@@ -1485,12 +1825,16 @@
#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_LEN 4
/* Timestamp major value */
#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0
+#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_LEN 4
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
+#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_LEN 4
/* Timestamp minor value */
#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4
+#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_LEN 4
/* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */
#define MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0
@@ -1502,47 +1846,85 @@
#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_LEN 4
/* Timestamp major value */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_LEN 4
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_LEN 4
/* Timestamp minor value */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_LEN 4
+
+/* MC_CMD_PTP_OUT_READ_NIC_TIME_V2 msgresponse */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_LEN 12
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_LEN 4
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_LEN 4
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_LEN 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_LEN 4
+/* Upper 32bits of major timestamp value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_OFST 8
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_LEN 4
/* MC_CMD_PTP_OUT_STATUS msgresponse */
#define MC_CMD_PTP_OUT_STATUS_LEN 64
/* Frequency of NIC's hardware clock */
#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0
+#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_LEN 4
/* Number of packets transmitted and timestamped */
#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4
+#define MC_CMD_PTP_OUT_STATUS_STATS_TX_LEN 4
/* Number of packets received and timestamped */
#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8
+#define MC_CMD_PTP_OUT_STATUS_STATS_RX_LEN 4
/* Number of packets timestamped by the FPGA */
#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12
+#define MC_CMD_PTP_OUT_STATUS_STATS_TS_LEN 4
/* Number of packets filter matched */
#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16
+#define MC_CMD_PTP_OUT_STATUS_STATS_FM_LEN 4
/* Number of packets not filter matched */
#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20
+#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_LEN 4
/* Number of PPS overflows (noise on input?) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_LEN 4
/* Number of PPS bad periods */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_LEN 4
/* Minimum period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_LEN 4
/* Maximum period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_LEN 4
/* Last period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_LEN 4
/* Mean period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_LEN 4
/* Minimum offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_LEN 4
/* Maximum offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_LEN 4
/* Last offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_LEN 4
/* Mean offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_LEN 4
/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20
@@ -1555,23 +1937,31 @@
#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12
/* Host time immediately before NIC's hardware clock read */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_LEN 4
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_LEN 4
/* Timestamp major value */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_LEN 4
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_LEN 4
/* Timestamp minor value */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_LEN 4
/* Host time immediately after NIC's hardware clock read */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_LEN 4
/* Number of nanoseconds waited after reading NIC's hardware clock */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_LEN 4
/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8
/* Results of testing */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_LEN 4
/* enum: Successful test */
#define MC_CMD_PTP_MANF_SUCCESS 0x0
/* enum: FPGA load failed */
@@ -1604,15 +1994,19 @@
#define MC_CMD_PTP_MANF_CLOCK_READ 0xe
/* Presence of external oscillator */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_LEN 4
/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12
/* Results of testing */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_LEN 4
/* Number of packets received by FPGA */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_LEN 4
/* Number of packets received by Siena filters */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_LEN 4
/* MC_CMD_PTP_OUT_FPGAREAD msgresponse */
#define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1
@@ -1628,9 +2022,11 @@
/* Time format required/used by for this NIC. Applies to all PTP MCDI
* operations that pass times between the host and firmware. If this operation
* is not supported (older firmware) a format of seconds and nanoseconds should
- * be assumed.
+ * be assumed. Note this enum is deprecated. Do not add to it- use the
+ * TIME_FORMAT field in MC_CMD_PTP_OUT_GET_ATTRIBUTES instead.
*/
#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_LEN 4
/* enum: Times are in seconds and nanoseconds */
#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0
/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
@@ -1646,12 +2042,16 @@
* be assumed.
*/
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_LEN 4
/* enum: Times are in seconds and nanoseconds */
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0
/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1
/* enum: Major register has units of seconds, minor 2^-27s per tick */
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2
+/* enum: Major register units are seconds, minor units are quarter nanoseconds
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS 0x3
/* Minimum acceptable value for a corrected synchronization timeset. When
* comparing host and NIC clock times, the MC returns a set of samples that
* contain the host start and end time, the MC time when the host start was
@@ -1660,46 +2060,66 @@
* end and start times minus the time that the MC waited for host end.
*/
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_LEN 4
/* Various PTP capabilities */
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_LEN 4
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_LBN 1
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_LBN 2
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN 3
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_WIDTH 1
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_LEN 4
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_LEN 4
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_LEN 4
/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
/* Uncorrected error on PTP transmit timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_LEN 4
/* Uncorrected error on PTP receive timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_LEN 4
/* Uncorrected error on PPS output in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_LEN 4
/* Uncorrected error on PPS input in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_LEN 4
/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24
/* Uncorrected error on PTP transmit timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_LEN 4
/* Uncorrected error on PTP receive timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_LEN 4
/* Uncorrected error on PPS output in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_LEN 4
/* Uncorrected error on PPS input in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_LEN 4
/* Uncorrected error on non-PTP transmit timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_LEN 4
/* Uncorrected error on non-PTP receive timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_LEN 4
/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */
#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4
/* Results of testing */
#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
@@ -1713,14 +2133,17 @@
*/
#define MC_CMD_CSR_READ32 0xc
-#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_CSR_READ32_IN msgrequest */
#define MC_CMD_CSR_READ32_IN_LEN 12
/* Address */
#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_READ32_IN_ADDR_LEN 4
#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
+#define MC_CMD_CSR_READ32_IN_STEP_LEN 4
#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
+#define MC_CMD_CSR_READ32_IN_NUMWORDS_LEN 4
/* MC_CMD_CSR_READ32_OUT msgresponse */
#define MC_CMD_CSR_READ32_OUT_LENMIN 4
@@ -1739,7 +2162,7 @@
*/
#define MC_CMD_CSR_WRITE32 0xd
-#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_CSR_WRITE32_IN msgrequest */
#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
@@ -1747,7 +2170,9 @@
#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
/* Address */
#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4
#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
+#define MC_CMD_CSR_WRITE32_IN_STEP_LEN 4
#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
@@ -1756,6 +2181,7 @@
/* MC_CMD_CSR_WRITE32_OUT msgresponse */
#define MC_CMD_CSR_WRITE32_OUT_LEN 4
#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
+#define MC_CMD_CSR_WRITE32_OUT_STATUS_LEN 4
/***********************************/
@@ -1776,6 +2202,7 @@
* sensors.
*/
#define MC_CMD_HP_IN_SUBCMD_OFST 0
+#define MC_CMD_HP_IN_SUBCMD_LEN 4
/* enum: OCSD (Option Card Sensor Data) sub-command. */
#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0
/* enum: Last known valid HP sub-command. */
@@ -1790,10 +2217,12 @@
* NULL.)
*/
#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
+#define MC_CMD_HP_IN_OCSD_INTERVAL_LEN 4
/* MC_CMD_HP_OUT msgresponse */
#define MC_CMD_HP_OUT_LEN 4
#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
+#define MC_CMD_HP_OUT_OCSD_STATUS_LEN 4
/* enum: OCSD stopped for this card. */
#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1
/* enum: OCSD was successfully started with the address provided. */
@@ -1838,29 +2267,35 @@
* external devices.
*/
#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
+#define MC_CMD_MDIO_READ_IN_BUS_LEN 4
/* enum: Internal. */
#define MC_CMD_MDIO_BUS_INTERNAL 0x0
/* enum: External. */
#define MC_CMD_MDIO_BUS_EXTERNAL 0x1
/* Port address */
#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
+#define MC_CMD_MDIO_READ_IN_PRTAD_LEN 4
/* Device Address or clause 22. */
#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
+#define MC_CMD_MDIO_READ_IN_DEVAD_LEN 4
/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
* want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
*/
#define MC_CMD_MDIO_CLAUSE22 0x20
/* Address */
#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
+#define MC_CMD_MDIO_READ_IN_ADDR_LEN 4
/* MC_CMD_MDIO_READ_OUT msgresponse */
#define MC_CMD_MDIO_READ_OUT_LEN 8
/* Value */
#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
+#define MC_CMD_MDIO_READ_OUT_VALUE_LEN 4
/* Status the MDIO commands return the raw status bits from the MDIO block. A
* "good" transaction should have the DONE bit set and all other bits clear.
*/
#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
+#define MC_CMD_MDIO_READ_OUT_STATUS_LEN 4
/* enum: Good. */
#define MC_CMD_MDIO_STATUS_GOOD 0x8
@@ -1879,22 +2314,27 @@
* external devices.
*/
#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
+#define MC_CMD_MDIO_WRITE_IN_BUS_LEN 4
/* enum: Internal. */
/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
/* enum: External. */
/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
/* Port address */
#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
+#define MC_CMD_MDIO_WRITE_IN_PRTAD_LEN 4
/* Device Address or clause 22. */
#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
+#define MC_CMD_MDIO_WRITE_IN_DEVAD_LEN 4
/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
* want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
*/
/* MC_CMD_MDIO_CLAUSE22 0x20 */
/* Address */
#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
+#define MC_CMD_MDIO_WRITE_IN_ADDR_LEN 4
/* Value */
#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
+#define MC_CMD_MDIO_WRITE_IN_VALUE_LEN 4
/* MC_CMD_MDIO_WRITE_OUT msgresponse */
#define MC_CMD_MDIO_WRITE_OUT_LEN 4
@@ -1902,6 +2342,7 @@
* "good" transaction should have the DONE bit set and all other bits clear.
*/
#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
+#define MC_CMD_MDIO_WRITE_OUT_STATUS_LEN 4
/* enum: Good. */
/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
@@ -1912,7 +2353,7 @@
*/
#define MC_CMD_DBI_WRITE 0x12
-#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DBI_WRITE_IN msgrequest */
#define MC_CMD_DBI_WRITE_IN_LENMIN 12
@@ -1932,9 +2373,11 @@
/* MC_CMD_DBIWROP_TYPEDEF structuredef */
#define MC_CMD_DBIWROP_TYPEDEF_LEN 12
#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LEN 4
#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4
#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
@@ -1944,6 +2387,7 @@
#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LEN 4
#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
@@ -1959,13 +2403,16 @@
#define MC_CMD_PORT_READ32_IN_LEN 4
/* Address */
#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
+#define MC_CMD_PORT_READ32_IN_ADDR_LEN 4
/* MC_CMD_PORT_READ32_OUT msgresponse */
#define MC_CMD_PORT_READ32_OUT_LEN 8
/* Value */
#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
+#define MC_CMD_PORT_READ32_OUT_VALUE_LEN 4
/* Status */
#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
+#define MC_CMD_PORT_READ32_OUT_STATUS_LEN 4
/***********************************/
@@ -1979,13 +2426,16 @@
#define MC_CMD_PORT_WRITE32_IN_LEN 8
/* Address */
#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_PORT_WRITE32_IN_ADDR_LEN 4
/* Value */
#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
+#define MC_CMD_PORT_WRITE32_IN_VALUE_LEN 4
/* MC_CMD_PORT_WRITE32_OUT msgresponse */
#define MC_CMD_PORT_WRITE32_OUT_LEN 4
/* Status */
#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
+#define MC_CMD_PORT_WRITE32_OUT_STATUS_LEN 4
/***********************************/
@@ -1999,6 +2449,7 @@
#define MC_CMD_PORT_READ128_IN_LEN 4
/* Address */
#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
+#define MC_CMD_PORT_READ128_IN_ADDR_LEN 4
/* MC_CMD_PORT_READ128_OUT msgresponse */
#define MC_CMD_PORT_READ128_OUT_LEN 20
@@ -2007,6 +2458,7 @@
#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
/* Status */
#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
+#define MC_CMD_PORT_READ128_OUT_STATUS_LEN 4
/***********************************/
@@ -2020,6 +2472,7 @@
#define MC_CMD_PORT_WRITE128_IN_LEN 20
/* Address */
#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
+#define MC_CMD_PORT_WRITE128_IN_ADDR_LEN 4
/* Value */
#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
@@ -2028,6 +2481,7 @@
#define MC_CMD_PORT_WRITE128_OUT_LEN 4
/* Status */
#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
+#define MC_CMD_PORT_WRITE128_OUT_STATUS_LEN 4
/* MC_CMD_CAPABILITIES structuredef */
#define MC_CMD_CAPABILITIES_LEN 4
@@ -2072,24 +2526,54 @@
#define MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136
#define MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num))
#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_LEN 4
#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
-/* See MC_CMD_CAPABILITIES */
+/* Capabilities for Siena Port0 (see struct MC_CMD_CAPABILITIES). Unused on
+ * EF10 and later (use MC_CMD_GET_CAPABILITIES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
-/* See MC_CMD_CAPABILITIES */
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_LEN 4
+/* Capabilities for Siena Port1 (see struct MC_CMD_CAPABILITIES). Unused on
+ * EF10 and later (use MC_CMD_GET_CAPABILITIES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_LEN 4
+/* Base MAC address for Siena Port0. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
+/* Base MAC address for Siena Port1. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6
+/* Size of MAC address pool for Siena Port0. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_LEN 4
+/* Size of MAC address pool for Siena Port1. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_LEN 4
+/* Increment between addresses in MAC address pool for Siena Port0. Unused on
+ * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_LEN 4
+/* Increment between addresses in MAC address pool for Siena Port1. Unused on
+ * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
-/* This field contains a 16-bit value for each of the types of NVRAM area. The
- * values are defined in the firmware/mc/platform/.c file for a specific board
- * type, but otherwise have no meaning to the MC; they are used by the driver
- * to manage selection of appropriate firmware updates.
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_LEN 4
+/* Siena only. This field contains a 16-bit value for each of the types of
+ * NVRAM area. The values are defined in the firmware/mc/platform/.c file for a
+ * specific board type, but otherwise have no meaning to the MC; they are used
+ * by the driver to manage selection of appropriate firmware updates. Unused on
+ * EF10 and later (use MC_CMD_NVRAM_METADATA).
*/
#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
@@ -2103,7 +2587,7 @@
*/
#define MC_CMD_DBI_READX 0x19
-#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DBI_READX_IN msgrequest */
#define MC_CMD_DBI_READX_IN_LENMIN 8
@@ -2130,9 +2614,11 @@
/* MC_CMD_DBIRDOP_TYPEDEF structuredef */
#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8
#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LEN 4
#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4
#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
@@ -2149,7 +2635,7 @@
*/
#define MC_CMD_SET_RAND_SEED 0x1a
-#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_SET_RAND_SEED_IN msgrequest */
#define MC_CMD_SET_RAND_SEED_IN_LEN 16
@@ -2198,14 +2684,17 @@
#define MC_CMD_DRV_ATTACH_IN_LEN 12
/* new state to set if UPDATE=1 */
#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_LEN 4
#define MC_CMD_DRV_ATTACH_LBN 0
#define MC_CMD_DRV_ATTACH_WIDTH 1
#define MC_CMD_DRV_PREBOOT_LBN 1
#define MC_CMD_DRV_PREBOOT_WIDTH 1
/* 1 to set new state, or 0 to just report the existing state */
#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
+#define MC_CMD_DRV_ATTACH_IN_UPDATE_LEN 4
/* preferred datapath firmware (for Huntington; ignored for Siena) */
#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8
+#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_LEN 4
/* enum: Prefer to use full featured firmware */
#define MC_CMD_FW_FULL_FEATURED 0x0
/* enum: Prefer to use firmware with fewer features but lower latency */
@@ -2229,13 +2718,16 @@
#define MC_CMD_DRV_ATTACH_OUT_LEN 4
/* previous or existing state, see the bitmask at NEW_STATE */
#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_LEN 4
/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */
#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8
/* previous or existing state, see the bitmask at NEW_STATE */
#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_LEN 4
/* Flags associated with this function */
#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_LEN 4
/* enum: Labels the lowest-numbered function visible to the OS */
#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
/* enum: The function can control the link state of the physical port it is
@@ -2260,6 +2752,7 @@
#define MC_CMD_SHMUART_IN_LEN 4
/* ??? */
#define MC_CMD_SHMUART_IN_FLAG_OFST 0
+#define MC_CMD_SHMUART_IN_FLAG_LEN 4
/* MC_CMD_SHMUART_OUT msgresponse */
#define MC_CMD_SHMUART_OUT_LEN 0
@@ -2297,6 +2790,7 @@
* (TBD).
*/
#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0
+#define MC_CMD_ENTITY_RESET_IN_FLAG_LEN 4
#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0
#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1
@@ -2314,8 +2808,10 @@
#define MC_CMD_PCIE_CREDITS_IN_LEN 8
/* poll period. 0 is disabled */
#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
+#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_LEN 4
/* wipe statistics */
#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
+#define MC_CMD_PCIE_CREDITS_IN_WIPE_LEN 4
/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
#define MC_CMD_PCIE_CREDITS_OUT_LEN 16
@@ -2346,31 +2842,54 @@
/* MC_CMD_RXD_MONITOR_IN msgrequest */
#define MC_CMD_RXD_MONITOR_IN_LEN 12
#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_IN_QID_LEN 4
#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4
+#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_LEN 4
#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8
+#define MC_CMD_RXD_MONITOR_IN_WIPE_LEN 4
/* MC_CMD_RXD_MONITOR_OUT msgresponse */
#define MC_CMD_RXD_MONITOR_OUT_LEN 80
#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_OUT_QID_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48
+#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_LEN 4
/***********************************/
@@ -2379,13 +2898,14 @@
*/
#define MC_CMD_PUTS 0x23
-#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_PUTS_IN msgrequest */
#define MC_CMD_PUTS_IN_LENMIN 13
#define MC_CMD_PUTS_IN_LENMAX 252
#define MC_CMD_PUTS_IN_LEN(num) (12+1*(num))
#define MC_CMD_PUTS_IN_DEST_OFST 0
+#define MC_CMD_PUTS_IN_DEST_LEN 4
#define MC_CMD_PUTS_IN_UART_LBN 0
#define MC_CMD_PUTS_IN_UART_WIDTH 1
#define MC_CMD_PUTS_IN_PORT_LBN 1
@@ -2417,6 +2937,7 @@
#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
/* flags */
#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_LEN 4
#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1
@@ -2433,8 +2954,10 @@
#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
+#define MC_CMD_GET_PHY_CFG_OUT_TYPE_LEN 4
/* Bitmask of supported capabilities */
#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
+#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_LEN 4
#define MC_CMD_PHY_CAP_10HDX_LBN 1
#define MC_CMD_PHY_CAP_10HDX_WIDTH 1
#define MC_CMD_PHY_CAP_10FDX_LBN 2
@@ -2459,17 +2982,39 @@
#define MC_CMD_PHY_CAP_40000FDX_WIDTH 1
#define MC_CMD_PHY_CAP_DDM_LBN 12
#define MC_CMD_PHY_CAP_DDM_WIDTH 1
+#define MC_CMD_PHY_CAP_100000FDX_LBN 13
+#define MC_CMD_PHY_CAP_100000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_25000FDX_LBN 14
+#define MC_CMD_PHY_CAP_25000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_50000FDX_LBN 15
+#define MC_CMD_PHY_CAP_50000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_BASER_FEC_LBN 16
+#define MC_CMD_PHY_CAP_BASER_FEC_WIDTH 1
+#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN 17
+#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_WIDTH 1
+#define MC_CMD_PHY_CAP_RS_FEC_LBN 18
+#define MC_CMD_PHY_CAP_RS_FEC_WIDTH 1
+#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN 19
+#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_WIDTH 1
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_LBN 20
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_WIDTH 1
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
+#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_LEN 4
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
+#define MC_CMD_GET_PHY_CFG_OUT_PRT_LEN 4
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
+#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_LEN 4
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
+#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_LEN 4
/* enum: Xaui. */
#define MC_CMD_MEDIA_XAUI 0x1
/* enum: CX4. */
@@ -2485,6 +3030,7 @@
/* enum: QSFP+. */
#define MC_CMD_MEDIA_QSFP_PLUS 0x7
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
+#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4
/* enum: Native clause 22 */
#define MC_CMD_MMD_CLAUSE22 0x0
#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
@@ -2515,6 +3061,7 @@
#define MC_CMD_START_BIST_IN_LEN 4
/* Type of test. */
#define MC_CMD_START_BIST_IN_TYPE_OFST 0
+#define MC_CMD_START_BIST_IN_TYPE_LEN 4
/* enum: Run the PHY's short cable BIST. */
#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1
/* enum: Run the PHY's long cable BIST. */
@@ -2556,6 +3103,7 @@
#define MC_CMD_POLL_BIST_OUT_LEN 8
/* result */
#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
+#define MC_CMD_POLL_BIST_OUT_RESULT_LEN 4
/* enum: Running. */
#define MC_CMD_POLL_BIST_RUNNING 0x1
/* enum: Passed. */
@@ -2565,19 +3113,26 @@
/* enum: Timed-out. */
#define MC_CMD_POLL_BIST_TIMEOUT 0x4
#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
+#define MC_CMD_POLL_BIST_OUT_PRIVATE_LEN 4
/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */
#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
/* result */
/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
/* Enum values, see field(s): */
/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_LEN 4
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_LEN 4
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_LEN 4
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_LEN 4
/* Status of each channel A */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_LEN 4
/* enum: Ok. */
#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1
/* enum: Open. */
@@ -2590,14 +3145,17 @@
#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9
/* Status of each channel B */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_LEN 4
/* Enum values, see field(s): */
/* CABLE_STATUS_A */
/* Status of each channel C */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_LEN 4
/* Enum values, see field(s): */
/* CABLE_STATUS_A */
/* Status of each channel D */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_LEN 4
/* Enum values, see field(s): */
/* CABLE_STATUS_A */
@@ -2605,9 +3163,11 @@
#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
/* result */
/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
/* Enum values, see field(s): */
/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
+#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_LEN 4
/* enum: Complete. */
#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0
/* enum: Bus switch off I2C write. */
@@ -2631,9 +3191,11 @@
#define MC_CMD_POLL_BIST_OUT_MEM_LEN 36
/* result */
/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
/* Enum values, see field(s): */
/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
#define MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4
+#define MC_CMD_POLL_BIST_OUT_MEM_TEST_LEN 4
/* enum: Test has completed. */
#define MC_CMD_POLL_BIST_MEM_COMPLETE 0x0
/* enum: RAM test - walk ones. */
@@ -2650,8 +3212,10 @@
#define MC_CMD_POLL_BIST_MEM_ECC 0x6
/* Failure address, only valid if result is POLL_BIST_FAILED */
#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8
+#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_LEN 4
/* Bus or address space to which the failure address corresponds */
#define MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12
+#define MC_CMD_POLL_BIST_OUT_MEM_BUS_LEN 4
/* enum: MC MIPS bus. */
#define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0
/* enum: CSR IREG bus. */
@@ -2672,14 +3236,19 @@
#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX1 0x8
/* Pattern written to RAM / register */
#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16
+#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_LEN 4
/* Actual value read from RAM / register */
#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20
+#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_LEN 4
/* ECC error mask */
#define MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_LEN 4
/* ECC parity error mask */
#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_LEN 4
/* ECC fatal error mask */
#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_LEN 4
/***********************************/
@@ -2831,6 +3400,143 @@
/* Enum values, see field(s): */
/* 100M */
+/* MC_CMD_GET_LOOPBACK_MODES_OUT_V2 msgresponse: Supported loopback modes for
+ * newer NICs with 25G/50G/100G support
+ */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN 64
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_OFST 4
+/* enum: None. */
+/* MC_CMD_LOOPBACK_NONE 0x0 */
+/* enum: Data. */
+/* MC_CMD_LOOPBACK_DATA 0x1 */
+/* enum: GMAC. */
+/* MC_CMD_LOOPBACK_GMAC 0x2 */
+/* enum: XGMII. */
+/* MC_CMD_LOOPBACK_XGMII 0x3 */
+/* enum: XGXS. */
+/* MC_CMD_LOOPBACK_XGXS 0x4 */
+/* enum: XAUI. */
+/* MC_CMD_LOOPBACK_XAUI 0x5 */
+/* enum: GMII. */
+/* MC_CMD_LOOPBACK_GMII 0x6 */
+/* enum: SGMII. */
+/* MC_CMD_LOOPBACK_SGMII 0x7 */
+/* enum: XGBR. */
+/* MC_CMD_LOOPBACK_XGBR 0x8 */
+/* enum: XFI. */
+/* MC_CMD_LOOPBACK_XFI 0x9 */
+/* enum: XAUI Far. */
+/* MC_CMD_LOOPBACK_XAUI_FAR 0xa */
+/* enum: GMII Far. */
+/* MC_CMD_LOOPBACK_GMII_FAR 0xb */
+/* enum: SGMII Far. */
+/* MC_CMD_LOOPBACK_SGMII_FAR 0xc */
+/* enum: XFI Far. */
+/* MC_CMD_LOOPBACK_XFI_FAR 0xd */
+/* enum: GPhy. */
+/* MC_CMD_LOOPBACK_GPHY 0xe */
+/* enum: PhyXS. */
+/* MC_CMD_LOOPBACK_PHYXS 0xf */
+/* enum: PCS. */
+/* MC_CMD_LOOPBACK_PCS 0x10 */
+/* enum: PMA-PMD. */
+/* MC_CMD_LOOPBACK_PMAPMD 0x11 */
+/* enum: Cross-Port. */
+/* MC_CMD_LOOPBACK_XPORT 0x12 */
+/* enum: XGMII-Wireside. */
+/* MC_CMD_LOOPBACK_XGMII_WS 0x13 */
+/* enum: XAUI Wireside. */
+/* MC_CMD_LOOPBACK_XAUI_WS 0x14 */
+/* enum: XAUI Wireside Far. */
+/* MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 */
+/* enum: XAUI Wireside near. */
+/* MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 */
+/* enum: GMII Wireside. */
+/* MC_CMD_LOOPBACK_GMII_WS 0x17 */
+/* enum: XFI Wireside. */
+/* MC_CMD_LOOPBACK_XFI_WS 0x18 */
+/* enum: XFI Wireside Far. */
+/* MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 */
+/* enum: PhyXS Wireside. */
+/* MC_CMD_LOOPBACK_PHYXS_WS 0x1a */
+/* enum: PMA lanes MAC-Serdes. */
+/* MC_CMD_LOOPBACK_PMA_INT 0x1b */
+/* enum: KR Serdes Parallel (Encoder). */
+/* MC_CMD_LOOPBACK_SD_NEAR 0x1c */
+/* enum: KR Serdes Serial. */
+/* MC_CMD_LOOPBACK_SD_FAR 0x1d */
+/* enum: PMA lanes MAC-Serdes Wireside. */
+/* MC_CMD_LOOPBACK_PMA_INT_WS 0x1e */
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+/* MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f */
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+/* MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 */
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+/* MC_CMD_LOOPBACK_SD_FEP_WS 0x21 */
+/* enum: KR Serdes Serial Wireside. */
+/* MC_CMD_LOOPBACK_SD_FES_WS 0x22 */
+/* enum: Near side of AOE Siena side port */
+/* MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 */
+/* enum: Medford Wireside datapath loopback */
+/* MC_CMD_LOOPBACK_DATA_WS 0x24 */
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+/* MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_OFST 12
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_OFST 20
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_OFST 28
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_OFST 36
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 25G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_OFST 44
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 50 loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_OFST 52
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 100G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_OFST 60
+/* Enum values, see field(s): */
+/* 100M */
+
/***********************************/
/* MC_CMD_GET_LINK
@@ -2848,17 +3554,22 @@
#define MC_CMD_GET_LINK_OUT_LEN 28
/* near-side advertised capabilities */
#define MC_CMD_GET_LINK_OUT_CAP_OFST 0
+#define MC_CMD_GET_LINK_OUT_CAP_LEN 4
/* link-partner advertised capabilities */
#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
+#define MC_CMD_GET_LINK_OUT_LP_CAP_LEN 4
/* Autonegotiated speed in mbit/s. The link may still be down even if this
* reads non-zero.
*/
#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
+#define MC_CMD_GET_LINK_OUT_LINK_SPEED_LEN 4
/* Current loopback setting. */
#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
+#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16
+#define MC_CMD_GET_LINK_OUT_FLAGS_LEN 4
#define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0
#define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1
#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1
@@ -2873,9 +3584,11 @@
#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
+#define MC_CMD_GET_LINK_OUT_FCNTL_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
+#define MC_CMD_GET_LINK_OUT_MAC_FAULT_LEN 4
#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1
@@ -2899,8 +3612,10 @@
#define MC_CMD_SET_LINK_IN_LEN 16
/* ??? */
#define MC_CMD_SET_LINK_IN_CAP_OFST 0
+#define MC_CMD_SET_LINK_IN_CAP_LEN 4
/* Flags */
#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4
+#define MC_CMD_SET_LINK_IN_FLAGS_LEN 4
#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
#define MC_CMD_SET_LINK_IN_POWEROFF_LBN 1
@@ -2909,12 +3624,14 @@
#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
/* Loopback mode. */
#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
+#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
/* A loopback speed of "0" is supported, and means (choose any available
* speed).
*/
#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
+#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_LEN 4
/* MC_CMD_SET_LINK_OUT msgresponse */
#define MC_CMD_SET_LINK_OUT_LEN 0
@@ -2932,6 +3649,7 @@
#define MC_CMD_SET_ID_LED_IN_LEN 4
/* Set LED state. */
#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0
+#define MC_CMD_SET_ID_LED_IN_STATE_LEN 4
#define MC_CMD_LED_OFF 0x0 /* enum */
#define MC_CMD_LED_ON 0x1 /* enum */
#define MC_CMD_LED_DEFAULT 0x2 /* enum */
@@ -2954,17 +3672,21 @@
* EtherII, VLAN, bug16011 padding).
*/
#define MC_CMD_SET_MAC_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_IN_MTU_LEN 4
#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_IN_DRAIN_LEN 4
#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
#define MC_CMD_SET_MAC_IN_ADDR_LEN 8
#define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8
#define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12
#define MC_CMD_SET_MAC_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_IN_REJECT_LEN 4
#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0
#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1
#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
+#define MC_CMD_SET_MAC_IN_FCNTL_LEN 4
/* enum: Flow control is off. */
#define MC_CMD_FCNTL_OFF 0x0
/* enum: Respond to flow control. */
@@ -2978,6 +3700,7 @@
/* enum: Issue flow control. */
#define MC_CMD_FCNTL_GENERATE 0x5
#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_IN_FLAGS_LEN 4
#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
@@ -2987,17 +3710,21 @@
* EtherII, VLAN, bug16011 padding).
*/
#define MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_EXT_IN_MTU_LEN 4
#define MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_EXT_IN_DRAIN_LEN 4
#define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8
#define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8
#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8
#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12
#define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_LEN 4
#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0
#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1
#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1
#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1
#define MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20
+#define MC_CMD_SET_MAC_EXT_IN_FCNTL_LEN 4
/* enum: Flow control is off. */
/* MC_CMD_FCNTL_OFF 0x0 */
/* enum: Respond to flow control. */
@@ -3011,6 +3738,7 @@
/* enum: Issue flow control. */
/* MC_CMD_FCNTL_GENERATE 0x5 */
#define MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_EXT_IN_FLAGS_LEN 4
#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0
#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1
/* Select which parameters to configure. A parameter will only be modified if
@@ -3019,6 +3747,7 @@
* set).
*/
#define MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28
+#define MC_CMD_SET_MAC_EXT_IN_CONTROL_LEN 4
#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0
#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1
#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1
@@ -3040,6 +3769,7 @@
* to 0.
*/
#define MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0
+#define MC_CMD_SET_MAC_V2_OUT_MTU_LEN 4
/***********************************/
@@ -3144,6 +3874,7 @@
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
#define MC_CMD_MAC_STATS_IN_CMD_OFST 8
+#define MC_CMD_MAC_STATS_IN_CMD_LEN 4
#define MC_CMD_MAC_STATS_IN_DMA_LBN 0
#define MC_CMD_MAC_STATS_IN_DMA_WIDTH 1
#define MC_CMD_MAC_STATS_IN_CLEAR_LBN 1
@@ -3158,9 +3889,16 @@
#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1
#define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
#define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
+/* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as
+ * returned by MC_CMD_GET_CAPABILITIES_V4_OUT. For legacy firmware not
+ * supporting MC_CMD_GET_CAPABILITIES_V4_OUT, DMA_LEN should be set to
+ * MC_CMD_MAC_NSTATS * sizeof(uint64_t)
+ */
#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+#define MC_CMD_MAC_STATS_IN_DMA_LEN_LEN 4
/* port id so vadapter stats can be provided */
#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
+#define MC_CMD_MAC_STATS_IN_PORT_ID_LEN 4
/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0
@@ -3305,9 +4043,126 @@
#define MC_CMD_GMAC_DMABUF_START 0x40
/* enum: End of GMAC stats buffer space, for Siena only. */
#define MC_CMD_GMAC_DMABUF_END 0x5f
-#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */
+/* enum: GENERATION_END value, used together with GENERATION_START to verify
+ * consistency of DMAd data. For legacy firmware / drivers without extended
+ * stats (more precisely, when DMA_LEN == MC_CMD_MAC_NSTATS *
+ * sizeof(uint64_t)), this entry holds the GENERATION_END value. Otherwise,
+ * this value is invalid/ reserved and GENERATION_END is written as the last
+ * 64-bit word of the DMA buffer (at DMA_LEN - sizeof(uint64_t)). Note that
+ * this is consistent with the legacy behaviour, in the sense that entry 96 is
+ * the last 64-bit word in the buffer when DMA_LEN == MC_CMD_MAC_NSTATS *
+ * sizeof(uint64_t). See SF-109306-TC, Section 9.2 for details.
+ */
+#define MC_CMD_MAC_GENERATION_END 0x60
#define MC_CMD_MAC_NSTATS 0x61 /* enum */
+/* MC_CMD_MAC_STATS_V2_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V2_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V2*64))>>3)
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2
+/* enum: Start of FEC stats buffer space, Medford2 and up */
+#define MC_CMD_MAC_FEC_DMABUF_START 0x61
+/* enum: Number of uncorrected FEC codewords on link (RS-FEC only for Medford2)
+ */
+#define MC_CMD_MAC_FEC_UNCORRECTED_ERRORS 0x61
+/* enum: Number of corrected FEC codewords on link (RS-FEC only for Medford2)
+ */
+#define MC_CMD_MAC_FEC_CORRECTED_ERRORS 0x62
+/* enum: Number of corrected 10-bit symbol errors, lane 0 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE0 0x63
+/* enum: Number of corrected 10-bit symbol errors, lane 1 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE1 0x64
+/* enum: Number of corrected 10-bit symbol errors, lane 2 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE2 0x65
+/* enum: Number of corrected 10-bit symbol errors, lane 3 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE3 0x66
+/* enum: This includes the space at offset 103 which is the final
+ * GENERATION_END in a MAC_STATS_V2 response and otherwise unused.
+ */
+#define MC_CMD_MAC_NSTATS_V2 0x68
+/* Other enum values, see field(s): */
+/* MC_CMD_MAC_STATS_OUT_NO_DMA/STATISTICS */
+
+/* MC_CMD_MAC_STATS_V3_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V3_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V3*64))>>3)
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3
+/* enum: Start of CTPIO stats buffer space, Medford2 and up */
+#define MC_CMD_MAC_CTPIO_DMABUF_START 0x68
+/* enum: Number of CTPIO fallbacks because a DMA packet was in progress on the
+ * target VI
+ */
+#define MC_CMD_MAC_CTPIO_VI_BUSY_FALLBACK 0x68
+/* enum: Number of times a CTPIO send wrote beyond frame end (informational
+ * only)
+ */
+#define MC_CMD_MAC_CTPIO_LONG_WRITE_SUCCESS 0x69
+/* enum: Number of CTPIO failures because the TX doorbell was written before
+ * the end of the frame data
+ */
+#define MC_CMD_MAC_CTPIO_MISSING_DBELL_FAIL 0x6a
+/* enum: Number of CTPIO failures because the internal FIFO overflowed */
+#define MC_CMD_MAC_CTPIO_OVERFLOW_FAIL 0x6b
+/* enum: Number of CTPIO failures because the host did not deliver data fast
+ * enough to avoid MAC underflow
+ */
+#define MC_CMD_MAC_CTPIO_UNDERFLOW_FAIL 0x6c
+/* enum: Number of CTPIO failures because the host did not deliver all the
+ * frame data within the timeout
+ */
+#define MC_CMD_MAC_CTPIO_TIMEOUT_FAIL 0x6d
+/* enum: Number of CTPIO failures because the frame data arrived out of order
+ * or with gaps
+ */
+#define MC_CMD_MAC_CTPIO_NONCONTIG_WR_FAIL 0x6e
+/* enum: Number of CTPIO failures because the host started a new frame before
+ * completing the previous one
+ */
+#define MC_CMD_MAC_CTPIO_FRM_CLOBBER_FAIL 0x6f
+/* enum: Number of CTPIO failures because a write was not a multiple of 32 bits
+ * or not 32-bit aligned
+ */
+#define MC_CMD_MAC_CTPIO_INVALID_WR_FAIL 0x70
+/* enum: Number of CTPIO fallbacks because another VI on the same port was
+ * sending a CTPIO frame
+ */
+#define MC_CMD_MAC_CTPIO_VI_CLOBBER_FALLBACK 0x71
+/* enum: Number of CTPIO fallbacks because target VI did not have CTPIO enabled
+ */
+#define MC_CMD_MAC_CTPIO_UNQUALIFIED_FALLBACK 0x72
+/* enum: Number of CTPIO fallbacks because length in header was less than 29
+ * bytes
+ */
+#define MC_CMD_MAC_CTPIO_RUNT_FALLBACK 0x73
+/* enum: Total number of successful CTPIO sends on this port */
+#define MC_CMD_MAC_CTPIO_SUCCESS 0x74
+/* enum: Total number of CTPIO fallbacks on this port */
+#define MC_CMD_MAC_CTPIO_FALLBACK 0x75
+/* enum: Total number of CTPIO poisoned frames on this port, whether erased or
+ * not
+ */
+#define MC_CMD_MAC_CTPIO_POISON 0x76
+/* enum: Total number of CTPIO erased frames on this port */
+#define MC_CMD_MAC_CTPIO_ERASE 0x77
+/* enum: This includes the space at offset 120 which is the final
+ * GENERATION_END in a MAC_STATS_V3 response and otherwise unused.
+ */
+#define MC_CMD_MAC_NSTATS_V3 0x79
+/* Other enum values, see field(s): */
+/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA/STATISTICS */
+
/***********************************/
/* MC_CMD_SRIOV
@@ -3318,21 +4173,28 @@
/* MC_CMD_SRIOV_IN msgrequest */
#define MC_CMD_SRIOV_IN_LEN 12
#define MC_CMD_SRIOV_IN_ENABLE_OFST 0
+#define MC_CMD_SRIOV_IN_ENABLE_LEN 4
#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4
+#define MC_CMD_SRIOV_IN_VI_BASE_LEN 4
#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8
+#define MC_CMD_SRIOV_IN_VF_COUNT_LEN 4
/* MC_CMD_SRIOV_OUT msgresponse */
#define MC_CMD_SRIOV_OUT_LEN 8
#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0
+#define MC_CMD_SRIOV_OUT_VI_SCALE_LEN 4
#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4
+#define MC_CMD_SRIOV_OUT_VF_TOTAL_LEN 4
/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
/* this is only used for the first record */
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LEN 4
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LEN 4
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
@@ -3342,6 +4204,7 @@
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LEN 4
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32
@@ -3352,6 +4215,7 @@
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LEN 4
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32
@@ -3403,10 +4267,12 @@
/* MC_CMD_WOL_FILTER_SET_IN msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_LEN 192
#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
+#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4
#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */
#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */
/* A type value of 1 is unused. */
#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
+#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4
/* enum: Magic */
#define MC_CMD_WOL_TYPE_MAGIC 0x0
/* enum: MS Windows Magic */
@@ -3428,7 +4294,9 @@
/* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16
/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8
@@ -3437,9 +4305,13 @@
/* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20
/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_LEN 4
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_LEN 4
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18
@@ -3448,7 +4320,9 @@
/* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44
/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8
#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16
#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24
@@ -3461,7 +4335,9 @@
/* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187
/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8
#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48
#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56
@@ -3476,8 +4352,11 @@
/* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12
/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_LEN 4
#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
@@ -3486,6 +4365,7 @@
/* MC_CMD_WOL_FILTER_SET_OUT msgresponse */
#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4
#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
+#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_LEN 4
/***********************************/
@@ -3499,6 +4379,7 @@
/* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */
#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
+#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_LEN 4
/* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */
#define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0
@@ -3516,6 +4397,7 @@
/* MC_CMD_WOL_FILTER_RESET_IN msgrequest */
#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4
#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
+#define MC_CMD_WOL_FILTER_RESET_IN_MASK_LEN 4
#define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */
#define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */
@@ -3556,6 +4438,7 @@
#define MC_CMD_NVRAM_TYPES_OUT_LEN 4
/* Bit mask of supported types. */
#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
+#define MC_CMD_NVRAM_TYPES_OUT_TYPES_LEN 4
/* enum: Disabled callisto. */
#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
/* enum: MC firmware. */
@@ -3612,47 +4495,65 @@
/* MC_CMD_NVRAM_INFO_IN msgrequest */
#define MC_CMD_NVRAM_INFO_IN_LEN 4
#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_INFO_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
/* MC_CMD_NVRAM_INFO_OUT msgresponse */
#define MC_CMD_NVRAM_INFO_OUT_LEN 24
#define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_INFO_OUT_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_OUT_SIZE_LEN 4
#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_LEN 4
#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_FLAGS_LEN 4
#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1
#define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_LBN 5
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6
#define MC_CMD_NVRAM_INFO_OUT_CMAC_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7
#define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_LEN 4
#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
+#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_LEN 4
/* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */
#define MC_CMD_NVRAM_INFO_V2_OUT_LEN 28
#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_LEN 4
#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_LEN 4
#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_LEN 4
#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0
#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1
#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1
#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_LBN 5
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_WIDTH 1
#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7
#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1
#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_LEN 4
#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_LEN 4
/* Writes must be multiples of this size. Added to support the MUM on Sorrento.
*/
#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_LEN 4
/***********************************/
@@ -3670,6 +4571,7 @@
*/
#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
@@ -3680,9 +4582,11 @@
*/
#define MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN 8
#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_OFST 4
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_LEN 4
#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
@@ -3703,20 +4607,26 @@
/* MC_CMD_NVRAM_READ_IN msgrequest */
#define MC_CMD_NVRAM_READ_IN_LEN 12
#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_READ_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_READ_IN_OFFSET_LEN 4
/* amount to read in bytes */
#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_READ_IN_LENGTH_LEN 4
/* MC_CMD_NVRAM_READ_IN_V2 msgrequest */
#define MC_CMD_NVRAM_READ_IN_V2_LEN 16
#define MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0
+#define MC_CMD_NVRAM_READ_IN_V2_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4
+#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_LEN 4
/* amount to read in bytes */
#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8
+#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_LEN 4
/* Optional control info. If a partition is stored with an A/B versioning
* scheme (i.e. in more than one physical partition in NVRAM) the host can set
* this to control which underlying physical partition is used to read data
@@ -3726,6 +4636,7 @@
* verifying by reading with MODE=TARGET_BACKUP.
*/
#define MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12
+#define MC_CMD_NVRAM_READ_IN_V2_MODE_LEN 4
/* enum: Same as omitting MODE: caller sees data in current partition unless it
* holds the write lock in which case it sees data in the partition it is
* updating.
@@ -3765,10 +4676,13 @@
#define MC_CMD_NVRAM_WRITE_IN_LENMAX 252
#define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num))
#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_WRITE_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_WRITE_IN_OFFSET_LEN 4
#define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_WRITE_IN_LENGTH_LEN 4
#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1
#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1
@@ -3791,10 +4705,13 @@
/* MC_CMD_NVRAM_ERASE_IN msgrequest */
#define MC_CMD_NVRAM_ERASE_IN_LEN 12
#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_ERASE_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_ERASE_IN_OFFSET_LEN 4
#define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_ERASE_IN_LENGTH_LEN 4
/* MC_CMD_NVRAM_ERASE_OUT msgresponse */
#define MC_CMD_NVRAM_ERASE_OUT_LEN 0
@@ -3815,9 +4732,11 @@
*/
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_LEN 4
/* MC_CMD_NVRAM_UPDATE_FINISH_V2_IN msgrequest: Extended NVRAM_UPDATE_FINISH
* request with additional flags indicating version of NVRAM_UPDATE commands in
@@ -3826,10 +4745,13 @@
*/
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN 12
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_OFST 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_LEN 4
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_OFST 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_LEN 4
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
@@ -3848,16 +4770,19 @@
* This process takes a few seconds to complete. So is likely to take more than
* the MCDI timeout. Hence signature verification is initiated when
* MC_CMD_NVRAM_UPDATE_FINISH_V2_IN is received by the firmware, however, the
- * MCDI command returns immediately with error code EAGAIN. Subsequent
- * NVRAM_UPDATE_FINISH_V2_IN requests also return EAGAIN if the verification is
- * in progress. Once the verification has completed, this response payload
- * includes the results of the signature verification. Note that the nvram lock
- * in firmware is only released after the verification has completed and the
- * host has read back the result code from firmware.
+ * MCDI command is run in a background MCDI processing thread. This response
+ * payload includes the results of the signature verification. Note that the
+ * per-partition nvram lock in firmware is only released after the verification
+ * has completed.
*/
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN 4
/* Result of nvram update completion processing */
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_LEN 4
+/* enum: Invalid return code; only non-zero values are defined. Defined as
+ * unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_UNKNOWN 0x0
/* enum: Verify succeeded without any errors. */
#define MC_CMD_NVRAM_VERIFY_RC_SUCCESS 0x1
/* enum: CMS format verification failed due to an internal error. */
@@ -3884,6 +4809,12 @@
* Trusted approver's list.
*/
#define MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH 0xb
+/* enum: The image contains a test-signed certificate, but the adapter accepts
+ * only production signed images.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc
+/* enum: The image has a lower security level than the current firmware. */
+#define MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd
/***********************************/
@@ -3911,6 +4842,7 @@
/* MC_CMD_REBOOT_IN msgrequest */
#define MC_CMD_REBOOT_IN_LEN 4
#define MC_CMD_REBOOT_IN_FLAGS_OFST 0
+#define MC_CMD_REBOOT_IN_FLAGS_LEN 4
#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */
/* MC_CMD_REBOOT_OUT msgresponse */
@@ -3947,11 +4879,12 @@
*/
#define MC_CMD_REBOOT_MODE 0x3f
-#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_REBOOT_MODE_IN msgrequest */
#define MC_CMD_REBOOT_MODE_IN_LEN 4
#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
+#define MC_CMD_REBOOT_MODE_IN_VALUE_LEN 4
/* enum: Normal. */
#define MC_CMD_REBOOT_MODE_NORMAL 0x0
/* enum: Power-on Reset. */
@@ -3966,6 +4899,7 @@
/* MC_CMD_REBOOT_MODE_OUT msgresponse */
#define MC_CMD_REBOOT_MODE_OUT_LEN 4
#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
+#define MC_CMD_REBOOT_MODE_OUT_VALUE_LEN 4
/***********************************/
@@ -4001,7 +4935,7 @@
*/
#define MC_CMD_SENSOR_INFO 0x41
-#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_SENSOR_INFO_IN msgrequest */
#define MC_CMD_SENSOR_INFO_IN_LEN 0
@@ -4015,12 +4949,14 @@
* Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc.
*/
#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
+#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_LEN 4
/* MC_CMD_SENSOR_INFO_OUT msgresponse */
#define MC_CMD_SENSOR_INFO_OUT_LENMIN 4
#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252
#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
+#define MC_CMD_SENSOR_INFO_OUT_MASK_LEN 4
/* enum: Controller temperature: degC */
#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0
/* enum: Phy common temperature: degC */
@@ -4183,6 +5119,20 @@
#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f
/* enum: Board temperature (back): degC */
#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50
+/* enum: 1.8v power current: mA */
+#define MC_CMD_SENSOR_IN_I1V8 0x51
+/* enum: 2.5v power current: mA */
+#define MC_CMD_SENSOR_IN_I2V5 0x52
+/* enum: 3.3v power current: mA */
+#define MC_CMD_SENSOR_IN_I3V3 0x53
+/* enum: 12v power current: mA */
+#define MC_CMD_SENSOR_IN_I12V0 0x54
+/* enum: 1.3v power: mV */
+#define MC_CMD_SENSOR_IN_1V3 0x55
+/* enum: 1.3v power current: mA */
+#define MC_CMD_SENSOR_IN_I1V3 0x56
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE2_NEXT 0x5f
/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
#define MC_CMD_SENSOR_ENTRY_OFST 4
#define MC_CMD_SENSOR_ENTRY_LEN 8
@@ -4196,6 +5146,7 @@
#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
+#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_SENSOR_INFO_OUT */
#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31
@@ -4247,7 +5198,7 @@
*/
#define MC_CMD_READ_SENSORS 0x42
-#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_READ_SENSORS_IN msgrequest */
#define MC_CMD_READ_SENSORS_IN_LEN 8
@@ -4266,6 +5217,7 @@
#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4
/* Size in bytes of host buffer. */
#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8
+#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_LEN 4
/* MC_CMD_READ_SENSORS_OUT msgresponse */
#define MC_CMD_READ_SENSORS_OUT_LEN 0
@@ -4319,6 +5271,7 @@
/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
+#define MC_CMD_GET_PHY_STATE_OUT_STATE_LEN 4
/* enum: Ok. */
#define MC_CMD_PHY_STATE_OK 0x1
/* enum: Faulty. */
@@ -4355,6 +5308,7 @@
/* MC_CMD_WOL_FILTER_GET_OUT msgresponse */
#define MC_CMD_WOL_FILTER_GET_OUT_LEN 4
#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0
+#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_LEN 4
/***********************************/
@@ -4371,6 +5325,7 @@
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
@@ -4381,13 +5336,16 @@
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_LEN 4
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10
@@ -4398,6 +5356,7 @@
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_LEN 4
/***********************************/
@@ -4412,7 +5371,9 @@
/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_LEN 4
/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
@@ -4451,6 +5412,7 @@
#define MC_CMD_TESTASSERT_V2_IN_LEN 4
/* How to provoke the assertion */
#define MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0
+#define MC_CMD_TESTASSERT_V2_IN_TYPE_LEN 4
/* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless
* you're testing firmware, this is what you want.
*/
@@ -4486,6 +5448,7 @@
#define MC_CMD_WORKAROUND_IN_LEN 8
/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */
#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
+#define MC_CMD_WORKAROUND_IN_TYPE_LEN 4
/* enum: Bug 17230 work around. */
#define MC_CMD_WORKAROUND_BUG17230 0x1
/* enum: Bug 35388 work around (unsafe EVQ writes). */
@@ -4514,6 +5477,7 @@
* the workaround
*/
#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
+#define MC_CMD_WORKAROUND_IN_ENABLED_LEN 4
/* MC_CMD_WORKAROUND_OUT msgresponse */
#define MC_CMD_WORKAROUND_OUT_LEN 0
@@ -4523,6 +5487,7 @@
*/
#define MC_CMD_WORKAROUND_EXT_OUT_LEN 4
#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0
+#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_LEN 4
#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0
#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1
@@ -4543,6 +5508,7 @@
/* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_LEN 4
/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
@@ -4550,6 +5516,7 @@
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
/* in bytes */
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_LEN 4
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1
@@ -4568,12 +5535,14 @@
/* MC_CMD_NVRAM_TEST_IN msgrequest */
#define MC_CMD_NVRAM_TEST_IN_LEN 4
#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_TEST_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
/* MC_CMD_NVRAM_TEST_OUT msgresponse */
#define MC_CMD_NVRAM_TEST_OUT_LEN 4
#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
+#define MC_CMD_NVRAM_TEST_OUT_RESULT_LEN 4
/* enum: Passed. */
#define MC_CMD_NVRAM_TEST_PASS 0x0
/* enum: Failed. */
@@ -4594,12 +5563,16 @@
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
/* 0-6 low->high de-emph. */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_LEN 4
/* 0-8 low->high ref.V */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_LEN 4
/* 0-8 0-8 low->high boost */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_LEN 4
/* 0-8 low->high ref.V */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_LEN 4
/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0
@@ -4608,10 +5581,13 @@
#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
/* input bits */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_LEN 4
/* output bits */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_LEN 4
/* direction */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_LEN 4
/* enum: Out. */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
/* enum: In. */
@@ -4626,21 +5602,26 @@
*/
#define MC_CMD_SENSOR_SET_LIMS 0x4e
-#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
+#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4
/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4
/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4
/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4
/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0
@@ -4657,9 +5638,13 @@
/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_LEN 4
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_LEN 4
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_LEN 4
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_LEN 4
/***********************************/
@@ -4680,6 +5665,7 @@
#define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num))
/* total number of partitions */
#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0
+#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_LEN 4
/* type ID code for each of NUM_PARTITIONS partitions */
#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4
#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4
@@ -4700,6 +5686,7 @@
#define MC_CMD_NVRAM_METADATA_IN_LEN 4
/* Partition type ID code */
#define MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_METADATA_IN_TYPE_LEN 4
/* MC_CMD_NVRAM_METADATA_OUT msgresponse */
#define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20
@@ -4707,7 +5694,9 @@
#define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num))
/* Partition type ID code */
#define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_METADATA_OUT_TYPE_LEN 4
#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4
+#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_LEN 4
#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0
#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1
#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1
@@ -4716,6 +5705,7 @@
#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1
/* Subtype ID code for content of this partition */
#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_LEN 4
/* 1st component of W.X.Y.Z version number for content of this partition */
#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12
#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2
@@ -4756,8 +5746,10 @@
#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2
/* Number of allocated MAC addresses */
#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_LEN 4
/* Spacing of allocated MAC addresses */
#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_LEN 4
/***********************************/
@@ -4772,6 +5764,7 @@
#define MC_CMD_CLP_IN_LEN 4
/* Sub operation */
#define MC_CMD_CLP_IN_OP_OFST 0
+#define MC_CMD_CLP_IN_OP_LEN 4
/* enum: Return to factory default settings */
#define MC_CMD_CLP_OP_DEFAULT 0x1
/* enum: Set MAC address */
@@ -4789,6 +5782,7 @@
/* MC_CMD_CLP_IN_DEFAULT msgrequest */
#define MC_CMD_CLP_IN_DEFAULT_LEN 4
/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
/* MC_CMD_CLP_OUT_DEFAULT msgresponse */
#define MC_CMD_CLP_OUT_DEFAULT_LEN 0
@@ -4796,6 +5790,7 @@
/* MC_CMD_CLP_IN_SET_MAC msgrequest */
#define MC_CMD_CLP_IN_SET_MAC_LEN 12
/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
/* MAC address assigned to port */
#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
@@ -4809,6 +5804,7 @@
/* MC_CMD_CLP_IN_GET_MAC msgrequest */
#define MC_CMD_CLP_IN_GET_MAC_LEN 4
/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
/* MC_CMD_CLP_OUT_GET_MAC msgresponse */
#define MC_CMD_CLP_OUT_GET_MAC_LEN 8
@@ -4822,6 +5818,7 @@
/* MC_CMD_CLP_IN_SET_BOOT msgrequest */
#define MC_CMD_CLP_IN_SET_BOOT_LEN 5
/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
/* Boot flag */
#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
@@ -4832,6 +5829,7 @@
/* MC_CMD_CLP_IN_GET_BOOT msgrequest */
#define MC_CMD_CLP_IN_GET_BOOT_LEN 4
/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4
@@ -4849,11 +5847,12 @@
*/
#define MC_CMD_MUM 0x57
-#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_MUM_IN msgrequest */
#define MC_CMD_MUM_IN_LEN 4
#define MC_CMD_MUM_IN_OP_HDR_OFST 0
+#define MC_CMD_MUM_IN_OP_HDR_LEN 4
#define MC_CMD_MUM_IN_OP_LBN 0
#define MC_CMD_MUM_IN_OP_WIDTH 8
/* enum: NULL MCDI command to MUM */
@@ -4893,26 +5892,32 @@
#define MC_CMD_MUM_IN_NULL_LEN 4
/* MUM cmd header */
#define MC_CMD_MUM_IN_CMD_OFST 0
+#define MC_CMD_MUM_IN_CMD_LEN 4
/* MC_CMD_MUM_IN_GET_VERSION msgrequest */
#define MC_CMD_MUM_IN_GET_VERSION_LEN 4
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* MC_CMD_MUM_IN_READ msgrequest */
#define MC_CMD_MUM_IN_READ_LEN 16
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* ID of (device connected to MUM) to read from registers of */
#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4
+#define MC_CMD_MUM_IN_READ_DEVICE_LEN 4
/* enum: Hittite HMC1035 clock generator on Sorrento board */
#define MC_CMD_MUM_DEV_HITTITE 0x1
/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2
/* 32-bit address to read from */
#define MC_CMD_MUM_IN_READ_ADDR_OFST 8
+#define MC_CMD_MUM_IN_READ_ADDR_LEN 4
/* Number of words to read. */
#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
+#define MC_CMD_MUM_IN_READ_NUMWORDS_LEN 4
/* MC_CMD_MUM_IN_WRITE msgrequest */
#define MC_CMD_MUM_IN_WRITE_LENMIN 16
@@ -4920,12 +5925,15 @@
#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* ID of (device connected to MUM) to write to registers of */
#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
+#define MC_CMD_MUM_IN_WRITE_DEVICE_LEN 4
/* enum: Hittite HMC1035 clock generator on Sorrento board */
/* MC_CMD_MUM_DEV_HITTITE 0x1 */
/* 32-bit address to write to */
#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
+#define MC_CMD_MUM_IN_WRITE_ADDR_LEN 4
/* Words to write */
#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
@@ -4938,12 +5946,16 @@
#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* MUM I2C cmd code */
#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
+#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_LEN 4
/* Number of bytes to write */
#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_LEN 4
/* Number of bytes to read */
#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_LEN 4
/* Bytes to write */
#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
@@ -4954,21 +5966,28 @@
#define MC_CMD_MUM_IN_LOG_LEN 8
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_LOG_OP_OFST 4
+#define MC_CMD_MUM_IN_LOG_OP_LEN 4
#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */
/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */
+/* MC_CMD_MUM_IN_LOG_OP_LEN 4 */
/* Enable/disable debug output to UART */
#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
+#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_LEN 4
/* MC_CMD_MUM_IN_GPIO msgrequest */
#define MC_CMD_MUM_IN_GPIO_LEN 8
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_HDR_LEN 4
#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
@@ -4981,40 +6000,56 @@
/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_LEN 4
/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_LEN 4
/* The first 32-bit word to be written to the GPIO OUT register. */
#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_LEN 4
/* The second 32-bit word to be written to the GPIO OUT register. */
#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_LEN 4
/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_LEN 4
/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_LEN 4
/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_LEN 4
/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_LEN 4
/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_LEN 4
/* MC_CMD_MUM_IN_GPIO_OP msgrequest */
#define MC_CMD_MUM_IN_GPIO_OP_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4
#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
@@ -5027,26 +6062,34 @@
/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_LEN 4
/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4
#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4
#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4
#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
@@ -5054,7 +6097,9 @@
#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
+#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4
#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
@@ -5064,13 +6109,16 @@
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* Bit-mask of clocks to be programmed */
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_LEN 4
#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
/* Control flags for clock programming */
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1
@@ -5082,19 +6130,24 @@
#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* Enable/Disable FPGA config from flash */
#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
+#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_LEN 4
/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* MC_CMD_MUM_IN_QSFP msgrequest */
#define MC_CMD_MUM_IN_QSFP_LEN 12
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
@@ -5104,52 +6157,77 @@
#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_IDX_LEN 4
/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_INIT_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_INIT_IDX_LEN 4
#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_LEN 4
/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_LEN 4
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_LEN 4
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_LEN 4
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_LEN 4
/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_LEN 4
/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_LEN 4
#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_LEN 4
/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_LEN 4
/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_LEN 4
/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */
#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* MC_CMD_MUM_OUT msgresponse */
#define MC_CMD_MUM_OUT_LEN 0
@@ -5160,6 +6238,7 @@
/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12
#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_LEN 4
#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
@@ -5197,8 +6276,10 @@
#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
/* The first 32-bit word read from the GPIO IN register. */
#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_LEN 4
/* The second 32-bit word read from the GPIO IN register. */
#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_LEN 4
/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
@@ -5207,8 +6288,10 @@
#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
/* The first 32-bit word read from the GPIO OUT register. */
#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_LEN 4
/* The second 32-bit word read from the GPIO OUT register. */
#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_LEN 4
/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
@@ -5216,11 +6299,14 @@
/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_LEN 4
#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_LEN 4
/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_LEN 4
/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
@@ -5249,6 +6335,7 @@
/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
+#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_LEN 4
/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
@@ -5256,6 +6343,7 @@
/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
+#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_LEN 4
/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
@@ -5263,7 +6351,9 @@
/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
@@ -5272,6 +6362,7 @@
/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_LEN 4
/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
@@ -5279,6 +6370,7 @@
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
/* in bytes */
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
@@ -5287,11 +6379,14 @@
/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_LEN 4
#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_LEN 4
/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_LEN 4
/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */
#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24
@@ -5299,12 +6394,14 @@
#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num))
/* Discrete (soldered) DDR resistor strap info */
#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4
#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0
#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16
#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16
#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16
/* Number of SODIMM info records */
#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_LEN 4
/* Array of SODIMM info records */
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
@@ -5365,6 +6462,7 @@
/* EVB_PORT_ID structuredef */
#define EVB_PORT_ID_LEN 4
#define EVB_PORT_ID_PORT_ID_OFST 0
+#define EVB_PORT_ID_PORT_ID_LEN 4
/* enum: An invalid port handle. */
#define EVB_PORT_ID_NULL 0x0
/* enum: The port assigned to this function.. */
@@ -5460,6 +6558,10 @@
#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04
/* enum: MUM firmware partition */
#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00
+/* enum: SUC firmware partition (this is intentionally an alias of
+ * MUM_FIRMWARE)
+ */
+#define NVRAM_PARTITION_TYPE_SUC_FIRMWARE 0xc00
/* enum: MUM Non-volatile log output partition. */
#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01
/* enum: MUM Application table partition. */
@@ -5474,8 +6576,8 @@
#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06
/* enum: UEFI expansion ROM if separate from PXE */
#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00
-/* enum: Spare partition 0 */
-#define NVRAM_PARTITION_TYPE_SPARE_0 0x1000
+/* enum: Used by the expansion ROM for logging */
+#define NVRAM_PARTITION_TYPE_PXE_LOG 0x1000
/* enum: Used for XIP code of shmbooted images */
#define NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100
/* enum: Spare partition 2 */
@@ -5488,6 +6590,27 @@
#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400
/* enum: Spare partition 5 */
#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500
+/* enum: Partition for reporting MC status. See mc_flash_layout.h
+ * medford_mc_status_hdr_t for layout on Medford.
+ */
+#define NVRAM_PARTITION_TYPE_STATUS 0x1600
+/* enum: Spare partition 13 */
+#define NVRAM_PARTITION_TYPE_SPARE_13 0x1700
+/* enum: Spare partition 14 */
+#define NVRAM_PARTITION_TYPE_SPARE_14 0x1800
+/* enum: Spare partition 15 */
+#define NVRAM_PARTITION_TYPE_SPARE_15 0x1900
+/* enum: Spare partition 16 */
+#define NVRAM_PARTITION_TYPE_SPARE_16 0x1a00
+/* enum: Factory defaults for dynamic configuration */
+#define NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS 0x1b00
+/* enum: Factory defaults for expansion ROM configuration */
+#define NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS 0x1c00
+/* enum: Field Replaceable Unit inventory information for use on IPMI
+ * platforms. See SF-119124-PS. The STATIC_CONFIG partition may contain a
+ * subset of the information stored in this partition.
+ */
+#define NVRAM_PARTITION_TYPE_FRU_INFORMATION 0x1d00
/* enum: Start of reserved value range (firmware may use for any purpose) */
#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
/* enum: End of reserved value range (firmware may use for any purpose) */
@@ -5502,6 +6625,7 @@
/* LICENSED_APP_ID structuredef */
#define LICENSED_APP_ID_LEN 4
#define LICENSED_APP_ID_ID_OFST 0
+#define LICENSED_APP_ID_ID_LEN 4
/* enum: OpenOnload */
#define LICENSED_APP_ID_ONLOAD 0x1
/* enum: PTP timestamping */
@@ -5526,6 +6650,14 @@
#define LICENSED_APP_ID_SOLARCAPTURE_TAP 0x400
/* enum: Capture SolarSystem 40G */
#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_40G 0x800
+/* enum: Capture SolarSystem 1G */
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G 0x1000
+/* enum: ScaleOut Onload */
+#define LICENSED_APP_ID_SCALEOUT_ONLOAD 0x2000
+/* enum: SCS Network Analytics Dashboard */
+#define LICENSED_APP_ID_DSHBRD 0x4000
+/* enum: SolarCapture Trading Analytics */
+#define LICENSED_APP_ID_SCATRD 0x8000
#define LICENSED_APP_ID_ID_LBN 0
#define LICENSED_APP_ID_ID_WIDTH 32
@@ -5590,6 +6722,14 @@
#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1
#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11
#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1
+#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13
+#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1
+#define LICENSED_V3_APPS_DSHBRD_LBN 14
+#define LICENSED_V3_APPS_DSHBRD_WIDTH 1
+#define LICENSED_V3_APPS_SCATRD_LBN 15
+#define LICENSED_V3_APPS_SCATRD_WIDTH 1
#define LICENSED_V3_APPS_MASK_LBN 0
#define LICENSED_V3_APPS_MASK_WIDTH 64
@@ -5636,6 +6776,18 @@
#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1
/* enum: This is a TX completion event, not a timestamp */
#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0
+/* enum: This is a TX completion event for a CTPIO transmit. The event format
+ * is the same as for TX_EV_COMPLETION.
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_COMPLETION 0x11
+/* enum: This is the low part of a TX timestamp for a CTPIO transmission. The
+ * event format is the same as for TX_EV_TSTAMP_LO
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_LO 0x12
+/* enum: This is the high part of a TX timestamp for a CTPIO transmission. The
+ * event format is the same as for TX_EV_TSTAMP_HI
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_HI 0x13
/* enum: This is the low part of a TX timestamp event */
#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51
/* enum: This is the high part of a TX timestamp event */
@@ -5669,6 +6821,19 @@
#define RSS_MODE_HASH_SELECTOR_LBN 0
#define RSS_MODE_HASH_SELECTOR_WIDTH 8
+/* CTPIO_STATS_MAP structuredef */
+#define CTPIO_STATS_MAP_LEN 4
+/* The (function relative) VI number */
+#define CTPIO_STATS_MAP_VI_OFST 0
+#define CTPIO_STATS_MAP_VI_LEN 2
+#define CTPIO_STATS_MAP_VI_LBN 0
+#define CTPIO_STATS_MAP_VI_WIDTH 16
+/* The target bucket for the VI */
+#define CTPIO_STATS_MAP_BUCKET_OFST 2
+#define CTPIO_STATS_MAP_BUCKET_LEN 2
+#define CTPIO_STATS_MAP_BUCKET_LBN 16
+#define CTPIO_STATS_MAP_BUCKET_WIDTH 16
+
/***********************************/
/* MC_CMD_READ_REGS
@@ -5676,7 +6841,7 @@
*/
#define MC_CMD_READ_REGS 0x50
-#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_READ_REGS_IN msgrequest */
#define MC_CMD_READ_REGS_IN_LEN 0
@@ -5709,17 +6874,22 @@
#define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num))
/* Size, in entries */
#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
+#define MC_CMD_INIT_EVQ_IN_SIZE_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4
+#define MC_CMD_INIT_EVQ_IN_INSTANCE_LEN 4
/* The initial timer value. The load value is ignored if the timer mode is DIS.
*/
#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8
+#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_LEN 4
/* The reload value is ignored in one-shot modes */
#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12
+#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_LEN 4
/* tbd */
#define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAGS_LEN 4
#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0
#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1
#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1
@@ -5735,6 +6905,7 @@
#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6
#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1
#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_LEN 4
/* enum: Disabled */
#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
/* enum: Immediate */
@@ -5745,13 +6916,16 @@
#define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3
/* Target EVQ for wakeups if in wakeup mode. */
#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24
+#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_LEN 4
/* Target interrupt if in interrupting mode (note union with target EVQ). Use
* MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
* purposes.
*/
#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24
+#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_LEN 4
/* Event Counter Mode. */
#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_LEN 4
/* enum: Disabled */
#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0
/* enum: Disabled */
@@ -5762,6 +6936,7 @@
#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3
/* Event queue packet count threshold. */
#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32
+#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8
@@ -5774,6 +6949,7 @@
#define MC_CMD_INIT_EVQ_OUT_LEN 4
/* Only valid if INTRFLAG was true */
#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0
+#define MC_CMD_INIT_EVQ_OUT_IRQ_LEN 4
/* MC_CMD_INIT_EVQ_V2_IN msgrequest */
#define MC_CMD_INIT_EVQ_V2_IN_LENMIN 44
@@ -5781,17 +6957,22 @@
#define MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num))
/* Size, in entries */
#define MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0
+#define MC_CMD_INIT_EVQ_V2_IN_SIZE_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4
+#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_LEN 4
/* The initial timer value. The load value is ignored if the timer mode is DIS.
*/
#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_OFST 8
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_LEN 4
/* The reload value is ignored in one-shot modes */
#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_OFST 12
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_LEN 4
/* tbd */
#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_LEN 4
#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0
#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1
#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1
@@ -5828,6 +7009,7 @@
*/
#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3
#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_LEN 4
/* enum: Disabled */
#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS 0x0
/* enum: Immediate */
@@ -5838,13 +7020,16 @@
#define MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF 0x3
/* Target EVQ for wakeups if in wakeup mode. */
#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_OFST 24
+#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_LEN 4
/* Target interrupt if in interrupting mode (note union with target EVQ). Use
* MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
* purposes.
*/
#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_OFST 24
+#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_LEN 4
/* Event Counter Mode. */
#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_OFST 28
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_LEN 4
/* enum: Disabled */
#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS 0x0
/* enum: Disabled */
@@ -5855,6 +7040,7 @@
#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RXTX 0x3
/* Event queue packet count threshold. */
#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_OFST 32
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8
@@ -5867,8 +7053,10 @@
#define MC_CMD_INIT_EVQ_V2_OUT_LEN 8
/* Only valid if INTRFLAG was true */
#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_OFST 0
+#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_LEN 4
/* Actual configuration applied on the card */
#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_LEN 4
#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0
#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1
#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1
@@ -5916,17 +7104,22 @@
#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
/* Size, in entries */
#define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0
+#define MC_CMD_INIT_RXQ_IN_SIZE_LEN 4
/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
*/
#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_LEN 4
/* The value to put in the event data. Check hardware spec. for valid range. */
#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8
+#define MC_CMD_INIT_RXQ_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_RXQ_IN_INSTANCE_LEN 4
/* There will be more flags here. */
#define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAGS_LEN 4
#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0
#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1
#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1
@@ -5945,8 +7138,10 @@
#define MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_RXQ_IN_OWNER_ID_LEN 4
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
#define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_RXQ_IN_PORT_ID_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8
@@ -5961,17 +7156,22 @@
#define MC_CMD_INIT_RXQ_EXT_IN_LEN 544
/* Size, in entries */
#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0
+#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_LEN 4
/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
*/
#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_LEN 4
/* The value to put in the event data. Check hardware spec. for valid range. */
#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8
+#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_LEN 4
/* There will be more flags here. */
#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_LEN 4
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1
@@ -6007,8 +7207,10 @@
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8
@@ -6017,6 +7219,7 @@
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64
/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540
+#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_LEN 4
/* MC_CMD_INIT_RXQ_OUT msgresponse */
#define MC_CMD_INIT_RXQ_OUT_LEN 0
@@ -6040,18 +7243,23 @@
#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
/* Size, in entries */
#define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0
+#define MC_CMD_INIT_TXQ_IN_SIZE_LEN 4
/* The EVQ to send events to. This is an index originally specified to
* INIT_EVQ.
*/
#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_LEN 4
/* The value to put in the event data. Check hardware spec. for valid range. */
#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8
+#define MC_CMD_INIT_TXQ_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_TXQ_IN_INSTANCE_LEN 4
/* There will be more flags here. */
#define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAGS_LEN 4
#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0
#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1
#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1
@@ -6072,8 +7280,10 @@
#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_TXQ_IN_OWNER_ID_LEN 4
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
#define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_TXQ_IN_PORT_ID_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8
@@ -6088,18 +7298,23 @@
#define MC_CMD_INIT_TXQ_EXT_IN_LEN 544
/* Size, in entries */
#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0
+#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_LEN 4
/* The EVQ to send events to. This is an index originally specified to
* INIT_EVQ.
*/
#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_LEN 4
/* The value to put in the event data. Check hardware spec. for valid range. */
#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8
+#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_LEN 4
/* There will be more flags here. */
#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_LEN 4
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1
@@ -6122,10 +7337,14 @@
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_LBN 13
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_LBN 14
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_LEN 4
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8
@@ -6135,6 +7354,7 @@
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64
/* Flags related to Qbb flow control mode. */
#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_LEN 4
#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0
#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1
#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1
@@ -6161,6 +7381,7 @@
* passed to INIT_EVQ
*/
#define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0
+#define MC_CMD_FINI_EVQ_IN_INSTANCE_LEN 4
/* MC_CMD_FINI_EVQ_OUT msgresponse */
#define MC_CMD_FINI_EVQ_OUT_LEN 0
@@ -6178,6 +7399,7 @@
#define MC_CMD_FINI_RXQ_IN_LEN 4
/* Instance of RXQ to destroy */
#define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0
+#define MC_CMD_FINI_RXQ_IN_INSTANCE_LEN 4
/* MC_CMD_FINI_RXQ_OUT msgresponse */
#define MC_CMD_FINI_RXQ_OUT_LEN 0
@@ -6195,6 +7417,7 @@
#define MC_CMD_FINI_TXQ_IN_LEN 4
/* Instance of TXQ to destroy */
#define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0
+#define MC_CMD_FINI_TXQ_IN_INSTANCE_LEN 4
/* MC_CMD_FINI_TXQ_OUT msgresponse */
#define MC_CMD_FINI_TXQ_OUT_LEN 0
@@ -6212,6 +7435,7 @@
#define MC_CMD_DRIVER_EVENT_IN_LEN 12
/* Handle of target EVQ */
#define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0
+#define MC_CMD_DRIVER_EVENT_IN_EVQ_LEN 4
/* Bits 0 - 63 of event */
#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4
#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8
@@ -6237,6 +7461,7 @@
#define MC_CMD_PROXY_CMD_IN_LEN 4
/* The handle of the target function. */
#define MC_CMD_PROXY_CMD_IN_TARGET_OFST 0
+#define MC_CMD_PROXY_CMD_IN_TARGET_LEN 4
#define MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0
#define MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16
#define MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16
@@ -6252,6 +7477,7 @@
#define MC_PROXY_STATUS_BUFFER_LEN 16
/* Handle allocated by the firmware for this proxy transaction */
#define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0
+#define MC_PROXY_STATUS_BUFFER_HANDLE_LEN 4
/* enum: An invalid handle. */
#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0
#define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0
@@ -6282,6 +7508,7 @@
* elevated privilege mask granted to the requesting function.
*/
#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LEN 4
#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96
#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32
@@ -6298,6 +7525,7 @@
/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */
#define MC_CMD_PROXY_CONFIGURE_IN_LEN 108
#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
+#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_LEN 4
#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
@@ -6309,6 +7537,7 @@
#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_LEN 4
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
* of blocks, each of the size REPLY_BLOCK_SIZE.
*/
@@ -6318,6 +7547,7 @@
#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_LEN 4
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
* of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
* host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
@@ -6328,8 +7558,10 @@
#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
/* Must be a power of 2, or zero if this buffer is not provided */
#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_LEN 4
/* Applies to all three buffers */
#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40
+#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_LEN 4
/* A bit mask defining which MCDI operations may be proxied */
#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
@@ -6337,6 +7569,7 @@
/* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_LEN 4
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
@@ -6348,6 +7581,7 @@
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_LEN 4
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
* of blocks, each of the size REPLY_BLOCK_SIZE.
*/
@@ -6357,6 +7591,7 @@
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_LEN 4
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
* of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
* host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
@@ -6367,12 +7602,15 @@
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32
/* Must be a power of 2, or zero if this buffer is not provided */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_LEN 4
/* Applies to all three buffers */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_LEN 4
/* A bit mask defining which MCDI operations may be proxied */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_LEN 4
/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
@@ -6392,7 +7630,9 @@
/* MC_CMD_PROXY_COMPLETE_IN msgrequest */
#define MC_CMD_PROXY_COMPLETE_IN_LEN 12
#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0
+#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_LEN 4
#define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4
+#define MC_CMD_PROXY_COMPLETE_IN_STATUS_LEN 4
/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply
* is stored in the REPLY_BUFF.
*/
@@ -6408,6 +7648,7 @@
*/
#define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3
#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8
+#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_LEN 4
/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */
#define MC_CMD_PROXY_COMPLETE_OUT_LEN 0
@@ -6427,17 +7668,22 @@
#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
/* Owner ID to use */
#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_LEN 4
/* Size of buffer table pages to use, in bytes (note that only a few values are
* legal on any specific hardware).
*/
#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_LEN 4
/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_LEN 4
#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_LEN 4
/* Buffer table IDs for use in DMA descriptors. */
#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_LEN 4
/***********************************/
@@ -6453,10 +7699,13 @@
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4
/* ID */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
/* Num entries */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
/* Buffer table entry address */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
@@ -6479,48 +7728,11 @@
/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_LEN 4
/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
-/* PORT_CONFIG_ENTRY structuredef */
-#define PORT_CONFIG_ENTRY_LEN 16
-/* External port number (label) */
-#define PORT_CONFIG_ENTRY_EXT_NUMBER_OFST 0
-#define PORT_CONFIG_ENTRY_EXT_NUMBER_LEN 1
-#define PORT_CONFIG_ENTRY_EXT_NUMBER_LBN 0
-#define PORT_CONFIG_ENTRY_EXT_NUMBER_WIDTH 8
-/* Port core location */
-#define PORT_CONFIG_ENTRY_CORE_OFST 1
-#define PORT_CONFIG_ENTRY_CORE_LEN 1
-#define PORT_CONFIG_ENTRY_STANDALONE 0x0 /* enum */
-#define PORT_CONFIG_ENTRY_MASTER 0x1 /* enum */
-#define PORT_CONFIG_ENTRY_SLAVE 0x2 /* enum */
-#define PORT_CONFIG_ENTRY_CORE_LBN 8
-#define PORT_CONFIG_ENTRY_CORE_WIDTH 8
-/* Internal number (HW resource) relative to the core */
-#define PORT_CONFIG_ENTRY_INT_NUMBER_OFST 2
-#define PORT_CONFIG_ENTRY_INT_NUMBER_LEN 1
-#define PORT_CONFIG_ENTRY_INT_NUMBER_LBN 16
-#define PORT_CONFIG_ENTRY_INT_NUMBER_WIDTH 8
-/* Reserved */
-#define PORT_CONFIG_ENTRY_RSVD_OFST 3
-#define PORT_CONFIG_ENTRY_RSVD_LEN 1
-#define PORT_CONFIG_ENTRY_RSVD_LBN 24
-#define PORT_CONFIG_ENTRY_RSVD_WIDTH 8
-/* Bitmask of KR lanes used by the port */
-#define PORT_CONFIG_ENTRY_LANES_OFST 4
-#define PORT_CONFIG_ENTRY_LANES_LBN 32
-#define PORT_CONFIG_ENTRY_LANES_WIDTH 32
-/* Port capabilities (MC_CMD_PHY_CAP_*) */
-#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_OFST 8
-#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_LBN 64
-#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_WIDTH 32
-/* Reserved (align to 16 bytes) */
-#define PORT_CONFIG_ENTRY_RSVD2_OFST 12
-#define PORT_CONFIG_ENTRY_RSVD2_LBN 96
-#define PORT_CONFIG_ENTRY_RSVD2_WIDTH 32
-
/***********************************/
/* MC_CMD_FILTER_OP
@@ -6534,6 +7746,7 @@
#define MC_CMD_FILTER_OP_IN_LEN 108
/* identifies the type of operation requested */
#define MC_CMD_FILTER_OP_IN_OP_OFST 0
+#define MC_CMD_FILTER_OP_IN_OP_LEN 4
/* enum: single-recipient filter insert */
#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0
/* enum: single-recipient filter remove */
@@ -6554,8 +7767,10 @@
/* The port ID associated with the v-adaptor which should contain this filter.
*/
#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12
+#define MC_CMD_FILTER_OP_IN_PORT_ID_LEN 4
/* fields to include in match criteria */
#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_LEN 4
#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0
#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1
#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1
@@ -6586,6 +7801,7 @@
#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
/* receive destination */
#define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20
+#define MC_CMD_FILTER_OP_IN_RX_DEST_LEN 4
/* enum: drop packets */
#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0
/* enum: receive to host */
@@ -6598,8 +7814,10 @@
#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4
/* receive queue handle (for multiple queue modes, this is the base queue) */
#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
+#define MC_CMD_FILTER_OP_IN_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28
+#define MC_CMD_FILTER_OP_IN_RX_MODE_LEN 4
/* enum: receive to just the specified queue */
#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0
/* enum: receive to multiple queues using RSS context */
@@ -6614,13 +7832,16 @@
* MC_CMD_DOT1P_MAPPING_ALLOC.
*/
#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
+#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_LEN 4
/* transmit domain (reserved; set to 0) */
#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36
+#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_LEN 4
/* transmit destination (either set the MAC and/or PM bits for explicit
* control, or set this field to TX_DEST_DEFAULT for sensible default
* behaviour)
*/
#define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40
+#define MC_CMD_FILTER_OP_IN_TX_DEST_LEN 4
/* enum: request default behaviour (based on filter type) */
#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff
#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0
@@ -6653,8 +7874,10 @@
#define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2
/* Firmware defined register 0 to match (reserved; set to 0) */
#define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68
+#define MC_CMD_FILTER_OP_IN_FWDEF0_LEN 4
/* Firmware defined register 1 to match (reserved; set to 0) */
#define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72
+#define MC_CMD_FILTER_OP_IN_FWDEF1_LEN 4
/* source IP address to match (as bytes in network order; set last 12 bytes to
* 0 for IPv4 address)
*/
@@ -6673,6 +7896,7 @@
#define MC_CMD_FILTER_OP_EXT_IN_LEN 172
/* identifies the type of operation requested */
#define MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0
+#define MC_CMD_FILTER_OP_EXT_IN_OP_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_FILTER_OP_IN/OP */
/* filter handle (for remove / unsubscribe operations) */
@@ -6683,8 +7907,10 @@
/* The port ID associated with the v-adaptor which should contain this filter.
*/
#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12
+#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_LEN 4
/* fields to include in match criteria */
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_LEN 4
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1
@@ -6743,6 +7969,7 @@
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
/* receive destination */
#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_LEN 4
/* enum: drop packets */
#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0
/* enum: receive to host */
@@ -6755,8 +7982,10 @@
#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4
/* receive queue handle (for multiple queue modes, this is the base queue) */
#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24
+#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_LEN 4
/* enum: receive to just the specified queue */
#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0
/* enum: receive to multiple queues using RSS context */
@@ -6771,13 +8000,16 @@
* MC_CMD_DOT1P_MAPPING_ALLOC.
*/
#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32
+#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_LEN 4
/* transmit domain (reserved; set to 0) */
#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_LEN 4
/* transmit destination (either set the MAC and/or PM bits for explicit
* control, or set this field to TX_DEST_DEFAULT for sensible default
* behaviour)
*/
#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_LEN 4
/* enum: request default behaviour (based on filter type) */
#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff
#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0
@@ -6810,11 +8042,13 @@
#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2
/* Firmware defined register 0 to match (reserved; set to 0) */
#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68
+#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_LEN 4
/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
* protocol is GRE) to match (as bytes in network order; set last byte to 0 for
* VXLAN/NVGRE, or 1 for Geneve)
*/
#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_LEN 4
#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0
#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24
#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24
@@ -6880,10 +8114,12 @@
* to 0)
*/
#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_LEN 4
/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
* to 0)
*/
#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_LEN 4
/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
* order; set last 12 bytes to 0 for IPv4 address)
*/
@@ -6899,6 +8135,7 @@
#define MC_CMD_FILTER_OP_OUT_LEN 12
/* identifies the type of operation requested */
#define MC_CMD_FILTER_OP_OUT_OP_OFST 0
+#define MC_CMD_FILTER_OP_OUT_OP_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_FILTER_OP_IN/OP */
/* Returned filter handle (for insert / subscribe operations). Note that these
@@ -6918,6 +8155,7 @@
#define MC_CMD_FILTER_OP_EXT_OUT_LEN 12
/* identifies the type of operation requested */
#define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0
+#define MC_CMD_FILTER_OP_EXT_OUT_OP_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_FILTER_OP_EXT_IN/OP */
/* Returned filter handle (for insert / subscribe operations). Note that these
@@ -6944,6 +8182,7 @@
#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
/* identifies the type of operation requested */
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_LEN 4
/* enum: read the list of supported RX filter matches */
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1
/* enum: read flags indicating restrictions on filter insertion for the calling
@@ -6966,10 +8205,12 @@
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num))
/* identifies the type of operation requested */
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
/* number of supported match types */
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_LEN 4
/* array of supported match types (valid MATCH_FIELDS values for
* MC_CMD_FILTER_OP) sorted in decreasing priority order
*/
@@ -6982,10 +8223,12 @@
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8
/* identifies the type of operation requested */
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
/* bitfield of filter insertion restrictions */
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_LEN 4
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
@@ -7005,11 +8248,16 @@
#define MC_CMD_PARSER_DISP_RW_IN_LEN 32
/* identifies the target of the operation */
#define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0
+#define MC_CMD_PARSER_DISP_RW_IN_TARGET_LEN 4
/* enum: RX dispatcher CPU */
#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
/* enum: TX dispatcher CPU */
#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
-/* enum: Lookup engine (with original metadata format) */
+/* enum: Lookup engine (with original metadata format). Deprecated; used only
+ * by cmdclient as a fallback for very old Huntington firmware, and not
+ * supported in firmware beyond v6.4.0.1005. Use LUE_VERSIONED_METADATA
+ * instead.
+ */
#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
/* enum: Lookup engine (with requested metadata format) */
#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3
@@ -7021,26 +8269,33 @@
#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5
/* identifies the type of operation requested */
#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
-/* enum: read a word of DICPU DMEM or a LUE entry */
+#define MC_CMD_PARSER_DISP_RW_IN_OP_LEN 4
+/* enum: Read a word of DICPU DMEM or a LUE entry */
#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0
-/* enum: write a word of DICPU DMEM or a LUE entry */
+/* enum: Write a word of DICPU DMEM or a LUE entry. */
#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
-/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */
+/* enum: Read-modify-write a word of DICPU DMEM (not valid for LUE). */
#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
/* data memory address (DICPU targets) or LUE index (LUE targets) */
#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
+#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_LEN 4
/* selector (for MISC_STATE target) */
#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8
+#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_LEN 4
/* enum: Port to datapath mapping */
#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1
/* value to write (for DMEM writes) */
#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_LEN 4
/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_LEN 4
/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_LEN 4
/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */
#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_LEN 4
/* value to write (for LUE writes) */
#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
@@ -7049,6 +8304,7 @@
#define MC_CMD_PARSER_DISP_RW_OUT_LEN 52
/* value read (for DMEM reads) */
#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0
+#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_LEN 4
/* value read (for LUE reads) */
#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0
#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20
@@ -7093,6 +8349,7 @@
#define MC_CMD_SET_PF_COUNT_IN_LEN 4
/* New number of PFs on the device. */
#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
+#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_LEN 4
/* MC_CMD_SET_PF_COUNT_OUT msgresponse */
#define MC_CMD_SET_PF_COUNT_OUT_LEN 0
@@ -7113,6 +8370,7 @@
#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4
/* Identifies the port assignment for this function. */
#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_LEN 4
/***********************************/
@@ -7127,6 +8385,7 @@
#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
/* Identifies the port assignment for this function. */
#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_LEN 4
/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
@@ -7144,8 +8403,10 @@
#define MC_CMD_ALLOC_VIS_IN_LEN 8
/* The minimum number of VIs that is acceptable */
#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0
+#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_LEN 4
/* The maximum number of VIs that would be useful */
#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
+#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_LEN 4
/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request.
* Use extended version in new code.
@@ -7153,21 +8414,26 @@
#define MC_CMD_ALLOC_VIS_OUT_LEN 8
/* The number of VIs allocated on this function */
#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
+#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_LEN 4
/* The base absolute VI number allocated to this function. Required to
* correctly interpret wakeup events.
*/
#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
+#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_LEN 4
/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */
#define MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12
/* The number of VIs allocated on this function */
#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_LEN 4
/* The base absolute VI number allocated to this function. Required to
* correctly interpret wakeup events.
*/
#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_LEN 4
/* Function's port vi_shift value (always 0 on Huntington) */
#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_LEN 4
/***********************************/
@@ -7201,15 +8467,20 @@
#define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20
/* Number of VFs currently enabled. */
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_LEN 4
/* Max number of VFs before sriov stride and offset may need to be changed. */
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_LEN 4
#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8
+#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_LEN 4
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1
/* RID offset of first VF from PF. */
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_LEN 4
/* RID offset of each subsequent VF from the previous. */
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_LEN 4
/***********************************/
@@ -7224,19 +8495,24 @@
#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
/* Number of VFs currently enabled. */
#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_LEN 4
/* Max number of VFs before sriov stride and offset may need to be changed. */
#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4
#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
+#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4
#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
/* RID offset of first VF from PF, or 0 for no change, or
* MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
*/
#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_LEN 4
/* RID offset of each subsequent VF from the previous, 0 for no change, or
* MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
*/
#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_LEN 4
/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
@@ -7258,12 +8534,15 @@
#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
/* The number of VIs allocated on this function */
#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_LEN 4
/* The base absolute VI number allocated to this function. Required to
* correctly interpret wakeup events.
*/
#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_LEN 4
/* Function's port vi_shift value (always 0 on Huntington) */
#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_LEN 4
/***********************************/
@@ -7278,6 +8557,7 @@
#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
/* The VI number to query. */
#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
+#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4
/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
#define MC_CMD_DUMP_VI_STATE_OUT_LEN 96
@@ -7311,6 +8591,7 @@
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
/* Combined metadata field. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
@@ -7392,6 +8673,7 @@
#define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4
/* Handle for allocated push I/O buffer. */
#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0
+#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_LEN 4
/***********************************/
@@ -7406,6 +8688,7 @@
#define MC_CMD_FREE_PIOBUF_IN_LEN 4
/* Handle for allocated push I/O buffer. */
#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
/* MC_CMD_FREE_PIOBUF_OUT msgresponse */
#define MC_CMD_FREE_PIOBUF_OUT_LEN 0
@@ -7423,6 +8706,7 @@
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
/* VI number to get information for. */
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */
#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4
@@ -7445,6 +8729,7 @@
#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19
#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1
#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_LEN 4
/***********************************/
@@ -7459,6 +8744,7 @@
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
/* VI number to set information for. */
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
/* Transaction processing steering hint 1 for use with the Rx Queue. */
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1
@@ -7478,6 +8764,7 @@
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_LEN 4
/* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */
#define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0
@@ -7494,6 +8781,7 @@
/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
/* enum: MISC. */
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0
/* enum: IDO. */
@@ -7506,10 +8794,12 @@
/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
/* Amalgamated TLP info word. */
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_LEN 4
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
@@ -7557,10 +8847,12 @@
/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
/* Amalgamated TLP info word. */
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_LEN 4
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
@@ -7627,6 +8919,7 @@
* in a command from the host.)
*/
#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_LEN 4
#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */
@@ -7636,6 +8929,7 @@
* mc_flash_layout.h.)
*/
#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_LEN 4
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
@@ -7672,12 +8966,14 @@
#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff
/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LEN 4
/* enum: Last chunk, containing checksum rather than data */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff
/* enum: Abort download of this item */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe
/* Length of this chunk in bytes */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_LEN 4
/* Data for this chunk */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16
#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
@@ -7688,8 +8984,10 @@
#define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
/* Same as MC_CMD_ERR field, but included as 0 in success cases */
#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_LEN 4
/* Extra status information */
#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_LEN 4
/* enum: Code download OK, completed. */
#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0
/* enum: Code download aborted as requested. */
@@ -7726,6 +9024,7 @@
#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20
/* First word of flags. */
#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_LEN 4
#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3
#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4
@@ -7793,6 +9092,8 @@
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1
/* enum: Packed stream RXDP firmware */
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE 0x5
/* enum: BIST RXDP firmware */
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a
/* enum: RXDP Test firmware image 1 */
@@ -7813,6 +9114,8 @@
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
/* enum: RXDP Test firmware image 9 */
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_SLOW 0x10c
/* TxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6
#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2
@@ -7822,6 +9125,8 @@
#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1
/* enum: High packet rate TXDP firmware */
#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_RULES_ENGINE 0x5
/* enum: BIST TXDP firmware */
#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d
/* enum: TXDP Test firmware image 1 */
@@ -7848,7 +9153,9 @@
* (Huntington development only)
*/
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
-/* enum: Virtual switching (full feature) RX PD production firmware */
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant RX PD firmware using PM rather than MAC
* (Huntington development only)
@@ -7864,6 +9171,8 @@
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine RX PD production firmware */
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: reserved value - do not use (bug69716) */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED_9 0x9
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* enum: RX PD firmware parsing but not filtering network overlay tunnel
@@ -7888,7 +9197,9 @@
* (Huntington development only)
*/
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
-/* enum: Virtual switching (full feature) TX PD production firmware */
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant TX PD firmware using PM rather than MAC
* (Huntington development only)
@@ -7901,12 +9212,16 @@
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine TX PD production firmware */
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: reserved value - do not use (bug69716) */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED_9 0x9
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* Hardware capabilities of NIC */
#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_LEN 4
/* Licensed capabilities */
#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_LEN 4
/* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */
#define MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0
@@ -7915,6 +9230,7 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72
/* First word of flags. */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_LEN 4
#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3
#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4
@@ -7982,6 +9298,8 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1
/* enum: Packed stream RXDP firmware */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_RULES_ENGINE 0x5
/* enum: BIST RXDP firmware */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a
/* enum: RXDP Test firmware image 1 */
@@ -8002,6 +9320,8 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
/* enum: RXDP Test firmware image 9 */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_SLOW 0x10c
/* TxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2
@@ -8011,6 +9331,8 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1
/* enum: High packet rate TXDP firmware */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_RULES_ENGINE 0x5
/* enum: BIST TXDP firmware */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d
/* enum: TXDP Test firmware image 1 */
@@ -8037,7 +9359,9 @@
* (Huntington development only)
*/
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
-/* enum: Virtual switching (full feature) RX PD production firmware */
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant RX PD firmware using PM rather than MAC
* (Huntington development only)
@@ -8053,6 +9377,8 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine RX PD production firmware */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: reserved value - do not use (bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED_9 0x9
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* enum: RX PD firmware parsing but not filtering network overlay tunnel
@@ -8077,7 +9403,9 @@
* (Huntington development only)
*/
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
-/* enum: Virtual switching (full feature) TX PD production firmware */
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant TX PD firmware using PM rather than MAC
* (Huntington development only)
@@ -8090,14 +9418,19 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine TX PD production firmware */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: reserved value - do not use (bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED_9 0x9
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* Hardware capabilities of NIC */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_LEN 4
/* Licensed capabilities */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_LEN 4
/* Second word of flags. Not present on older firmware (check the length). */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_LEN 4
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1
@@ -8124,6 +9457,18 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
* on older firmware (check the length).
*/
@@ -8181,9 +9526,10 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2
/* MC_CMD_GET_CAPABILITIES_V3_OUT msgresponse */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 73
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 76
/* First word of flags. */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_LEN 4
#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3
#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4
@@ -8251,6 +9597,8 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1
/* enum: Packed stream RXDP firmware */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_RULES_ENGINE 0x5
/* enum: BIST RXDP firmware */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a
/* enum: RXDP Test firmware image 1 */
@@ -8271,6 +9619,8 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
/* enum: RXDP Test firmware image 9 */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_SLOW 0x10c
/* TxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2
@@ -8280,6 +9630,8 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1
/* enum: High packet rate TXDP firmware */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_RULES_ENGINE 0x5
/* enum: BIST TXDP firmware */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d
/* enum: TXDP Test firmware image 1 */
@@ -8306,7 +9658,9 @@
* (Huntington development only)
*/
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
-/* enum: Virtual switching (full feature) RX PD production firmware */
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant RX PD firmware using PM rather than MAC
* (Huntington development only)
@@ -8322,6 +9676,8 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine RX PD production firmware */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: reserved value - do not use (bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED_9 0x9
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* enum: RX PD firmware parsing but not filtering network overlay tunnel
@@ -8346,7 +9702,9 @@
* (Huntington development only)
*/
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
-/* enum: Virtual switching (full feature) TX PD production firmware */
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant TX PD firmware using PM rather than MAC
* (Huntington development only)
@@ -8359,14 +9717,19 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine TX PD production firmware */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: reserved value - do not use (bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED_9 0x9
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* Hardware capabilities of NIC */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_LEN 4
/* Licensed capabilities */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_LEN 4
/* Second word of flags. Not present on older firmware (check the length). */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_LEN 4
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1
@@ -8393,6 +9756,18 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
* on older firmware (check the length).
*/
@@ -8463,6 +9838,348 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1
/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+
+/* MC_CMD_GET_CAPABILITIES_V4_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_LEN 78
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: reserved value - do not use (bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED_9 0x9
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: reserved value - do not use (bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED_9 0x9
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
+ * on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_LEN 2
/***********************************/
@@ -8502,6 +10219,7 @@
#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
/* the bucket id */
#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
+#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_LEN 4
/***********************************/
@@ -8516,6 +10234,7 @@
#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
/* the bucket id */
#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
+#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_LEN 4
/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
@@ -8533,17 +10252,22 @@
#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
/* the bucket id */
#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0
+#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_LEN 4
/* the rate in mbps */
#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
+#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_LEN 4
/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */
#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12
/* the bucket id */
#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_LEN 4
/* the rate in mbps */
#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_LEN 4
/* the desired maximum fill level */
#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_LEN 4
/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
@@ -8561,10 +10285,13 @@
#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28
/* the txq id */
#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
+#define MC_CMD_TCM_TXQ_INIT_IN_QID_LEN 4
/* the static priority associated with the txq */
#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
+#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_LEN 4
/* bitmask of the priority queues this txq is inserted into when inserted. */
#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_LEN 4
#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
@@ -8573,25 +10300,32 @@
#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
/* the reaction point (RP) bucket */
#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
+#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_LEN 4
/* an already reserved bucket (typically set to bucket associated with outer
* vswitch)
*/
#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_LEN 4
/* an already reserved bucket (typically set to bucket associated with inner
* vswitch)
*/
#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_LEN 4
/* the min bucket (typically for ETS/minimum bandwidth) */
#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
+#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_LEN 4
/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32
/* the txq id */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_LEN 4
/* the static priority associated with the txq */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_LEN 4
/* bitmask of the priority queues this txq is inserted into when inserted. */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_LEN 4
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
@@ -8600,18 +10334,23 @@
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
/* the reaction point (RP) bucket */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_LEN 4
/* an already reserved bucket (typically set to bucket associated with outer
* vswitch)
*/
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_LEN 4
/* an already reserved bucket (typically set to bucket associated with inner
* vswitch)
*/
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_LEN 4
/* the min bucket (typically for ETS/minimum bandwidth) */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_LEN 4
/* the static priority associated with the txq */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_LEN 4
/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
@@ -8629,8 +10368,10 @@
#define MC_CMD_LINK_PIOBUF_IN_LEN 8
/* Handle for allocated push I/O buffer. */
#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
/* Function Local Instance (VI) number. */
#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4
+#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
/* MC_CMD_LINK_PIOBUF_OUT msgresponse */
#define MC_CMD_LINK_PIOBUF_OUT_LEN 0
@@ -8648,6 +10389,7 @@
#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4
/* Function Local Instance (VI) number. */
#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0
+#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
/* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */
#define MC_CMD_UNLINK_PIOBUF_OUT_LEN 0
@@ -8665,8 +10407,10 @@
#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16
/* The port to connect to the v-switch's upstream port. */
#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* The type of v-switch to create. */
#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4
+#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_LEN 4
/* enum: VLAN */
#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1
/* enum: VEB */
@@ -8679,6 +10423,7 @@
#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5
/* Flags controlling v-port creation */
#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_LEN 4
#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
/* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators,
@@ -8689,6 +10434,7 @@
* v-ports with this number of tags.
*/
#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
#define MC_CMD_VSWITCH_ALLOC_OUT_LEN 0
@@ -8706,6 +10452,7 @@
#define MC_CMD_VSWITCH_FREE_IN_LEN 4
/* The port to which the v-switch is connected. */
#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_VSWITCH_FREE_OUT msgresponse */
#define MC_CMD_VSWITCH_FREE_OUT_LEN 0
@@ -8725,6 +10472,7 @@
#define MC_CMD_VSWITCH_QUERY_IN_LEN 4
/* The port to which the v-switch is connected. */
#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */
#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0
@@ -8742,8 +10490,10 @@
#define MC_CMD_VPORT_ALLOC_IN_LEN 20
/* The port to which the v-switch is connected. */
#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* The type of the new v-port. */
#define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4
+#define MC_CMD_VPORT_ALLOC_IN_TYPE_LEN 4
/* enum: VLAN (obsolete) */
#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1
/* enum: VEB (obsolete) */
@@ -8764,6 +10514,7 @@
#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6
/* Flags controlling v-port creation */
#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VPORT_ALLOC_IN_FLAGS_LEN 4
#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1
@@ -8773,8 +10524,10 @@
* v-switch.
*/
#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
/* The actual VLAN tags to insert/remove */
#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_LEN 4
#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0
#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16
#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16
@@ -8784,6 +10537,7 @@
#define MC_CMD_VPORT_ALLOC_OUT_LEN 4
/* The handle of the new v-port */
#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_LEN 4
/***********************************/
@@ -8798,6 +10552,7 @@
#define MC_CMD_VPORT_FREE_IN_LEN 4
/* The handle of the v-port */
#define MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_FREE_IN_VPORT_ID_LEN 4
/* MC_CMD_VPORT_FREE_OUT msgresponse */
#define MC_CMD_VPORT_FREE_OUT_LEN 0
@@ -8815,18 +10570,23 @@
#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 30
/* The port to connect to the v-adaptor's port. */
#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* Flags controlling v-adaptor creation */
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_LEN 4
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
/* The number of VLAN tags to strip on receive */
#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_LEN 4
/* The number of VLAN tags to transparently insert/remove. */
#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
/* The actual VLAN tags to insert/remove */
#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_LEN 4
#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0
#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16
#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16
@@ -8853,6 +10613,7 @@
#define MC_CMD_VADAPTOR_FREE_IN_LEN 4
/* The port to which the v-adaptor is connected. */
#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_VADAPTOR_FREE_OUT msgresponse */
#define MC_CMD_VADAPTOR_FREE_OUT_LEN 0
@@ -8870,6 +10631,7 @@
#define MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10
/* The port to which the v-adaptor is connected. */
#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
/* The new MAC address to assign to this v-adaptor */
#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4
#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6
@@ -8890,6 +10652,7 @@
#define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
/* The port to which the v-adaptor is connected. */
#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
#define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
@@ -8910,15 +10673,19 @@
#define MC_CMD_VADAPTOR_QUERY_IN_LEN 4
/* The port to which the v-adaptor is connected. */
#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */
#define MC_CMD_VADAPTOR_QUERY_OUT_LEN 12
/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0
+#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_LEN 4
/* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */
#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4
+#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_LEN 4
/* The number of VLAN tags that may still be added */
#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8
+#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
/***********************************/
@@ -8933,8 +10700,10 @@
#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
/* The port to assign. */
#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_LEN 4
/* The target function to modify. */
#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4
+#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_LEN 4
#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0
#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16
#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16
@@ -8955,9 +10724,13 @@
/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_LEN 4
/* Write enable bits 0-3, set to write, clear to read. */
#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
@@ -8969,9 +10742,13 @@
*/
#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_LEN 4
/***********************************/
@@ -8986,11 +10763,13 @@
#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
/* The handle of the owning upstream port */
#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
/* The handle of the new Onload stack */
#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_LEN 4
/***********************************/
@@ -9005,6 +10784,7 @@
#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
/* The handle of the Onload stack */
#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
+#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_LEN 4
/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
@@ -9022,8 +10802,10 @@
#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
/* The handle of the owning upstream port */
#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* The type of context to allocate */
#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_LEN 4
/* enum: Allocate a context for exclusive use. The key and indirection table
* must be explicitly configured.
*/
@@ -9037,6 +10819,7 @@
* in the indirection table will be in the range 0 to NUM_QUEUES-1.
*/
#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_LEN 4
/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
@@ -9045,6 +10828,7 @@
* handle.
*/
#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_LEN 4
/* enum: guaranteed invalid RSS context handle value */
#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff
@@ -9061,6 +10845,7 @@
#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_LEN 4
/* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0
@@ -9078,6 +10863,7 @@
#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_LEN 4
/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4
#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40
@@ -9098,6 +10884,7 @@
#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_LEN 4
/* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44
@@ -9118,6 +10905,7 @@
#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_LEN 4
/* The 128-byte indirection table (1 byte per entry) */
#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4
#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128
@@ -9138,6 +10926,7 @@
#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_LEN 4
/* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132
@@ -9158,6 +10947,7 @@
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4
/* Hash control flags. The _EN bits are always supported, but new modes are
* available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES:
* in this case, the MODE fields may be set to non-zero values, and will take
@@ -9171,6 +10961,7 @@
* particular packet type.)
*/
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_LEN 4
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1
@@ -9210,6 +11001,7 @@
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4
/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
@@ -9227,6 +11019,7 @@
* always be used for a SET regardless of old/new driver vs. old/new firmware.
*/
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_LEN 4
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1
@@ -9263,11 +11056,13 @@
#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
/* The handle of the owning upstream port */
#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* Number of queues spanned by this mapping, in the range 1-64; valid fixed
* offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and
* referenced RSS contexts must span no more than this number.
*/
#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_LEN 4
/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
@@ -9276,6 +11071,7 @@
* handle.
*/
#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_LEN 4
/* enum: guaranteed invalid .1p mapping handle value */
#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff
@@ -9292,6 +11088,7 @@
#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
/* The handle of the .1p mapping */
#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_LEN 4
/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */
#define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0
@@ -9309,6 +11106,7 @@
#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
/* The handle of the .1p mapping */
#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
* handle)
*/
@@ -9331,6 +11129,7 @@
#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
/* The handle of the .1p mapping */
#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */
#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36
@@ -9356,10 +11155,13 @@
#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
/* Base absolute interrupt vector number. */
#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
+#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_LEN 4
/* Number of interrupt vectors allocate to this PF. */
#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_LEN 4
/* Number of interrupt vectors to allocate per VF. */
#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_LEN 4
/***********************************/
@@ -9376,10 +11178,13 @@
* let the system find a suitable base.
*/
#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
+#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_LEN 4
/* Number of interrupt vectors allocate to this PF. */
#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_LEN 4
/* Number of interrupt vectors to allocate per VF. */
#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_LEN 4
/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
@@ -9397,6 +11202,7 @@
#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
/* The handle of the v-port */
#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_LEN 4
/* MAC address to add */
#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4
#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6
@@ -9417,6 +11223,7 @@
#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
/* The handle of the v-port */
#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_LEN 4
/* MAC address to add */
#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4
#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6
@@ -9437,6 +11244,7 @@
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
/* The handle of the v-port */
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_LEN 4
/* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4
@@ -9444,6 +11252,7 @@
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num))
/* The number of MAC addresses returned */
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_LEN 4
/* Array of MAC addresses */
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6
@@ -9465,8 +11274,10 @@
#define MC_CMD_VPORT_RECONFIGURE_IN_LEN 44
/* The handle of the v-port */
#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_LEN 4
/* Flags requesting what should be changed. */
#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4
+#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_LEN 4
#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0
#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1
#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1
@@ -9476,14 +11287,17 @@
* v-switch.
*/
#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_LEN 4
/* The actual VLAN tags to insert/remove */
#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_LEN 4
#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0
#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16
#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16
#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16
/* The number of MAC addresses to add */
#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_LEN 4
/* MAC addresses to add */
#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20
#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6
@@ -9492,6 +11306,7 @@
/* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */
#define MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4
#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0
+#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_LEN 4
#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0
#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1
@@ -9508,15 +11323,18 @@
#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4
/* The handle of the v-port */
#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0
+#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_LEN 4
/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */
#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8
/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0
+#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_LEN 4
/* The number of VLAN tags that may be used on a v-adaptor connected to this
* EVB port.
*/
#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4
+#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
/***********************************/
@@ -9528,14 +11346,16 @@
*/
#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
-#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
/* Index of the first buffer table entry. */
#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
/* Number of buffer table entries to dump. */
#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
@@ -9559,6 +11379,7 @@
/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
+#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_LEN 4
#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1
@@ -9588,6 +11409,7 @@
/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */
#define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
+#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_LEN 4
#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1
@@ -9611,8 +11433,10 @@
#define MC_CMD_GET_CLOCK_OUT_LEN 8
/* System frequency, MHz */
#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0
+#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_LEN 4
/* DPCPU frequency, MHz */
#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4
+#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_LEN 4
/***********************************/
@@ -9621,36 +11445,43 @@
*/
#define MC_CMD_SET_CLOCK 0xad
-#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_SET_CLOCK_IN msgrequest */
#define MC_CMD_SET_CLOCK_IN_LEN 28
/* Requested frequency in MHz for system clock domain */
#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
+#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_LEN 4
/* enum: Leave the system clock domain frequency unchanged */
#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for inter-core clock domain */
#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
+#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_LEN 4
/* enum: Leave the inter-core clock domain frequency unchanged */
#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for DPCPU clock domain */
#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
+#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_LEN 4
/* enum: Leave the DPCPU clock domain frequency unchanged */
#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for PCS clock domain */
#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12
+#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_LEN 4
/* enum: Leave the PCS clock domain frequency unchanged */
#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for MC clock domain */
#define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16
+#define MC_CMD_SET_CLOCK_IN_MC_FREQ_LEN 4
/* enum: Leave the MC clock domain frequency unchanged */
#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for rmon clock domain */
#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20
+#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_LEN 4
/* enum: Leave the rmon clock domain frequency unchanged */
#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for vswitch clock domain */
#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24
+#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_LEN 4
/* enum: Leave the vswitch clock domain frequency unchanged */
#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0
@@ -9658,30 +11489,37 @@
#define MC_CMD_SET_CLOCK_OUT_LEN 28
/* Resulting system frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
+#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_LEN 4
/* enum: The system clock domain doesn't exist */
#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0
/* Resulting inter-core frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
+#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_LEN 4
/* enum: The inter-core clock domain doesn't exist / isn't used */
#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0
/* Resulting DPCPU frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
+#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_LEN 4
/* enum: The dpcpu clock domain doesn't exist */
#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0
/* Resulting PCS frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12
+#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_LEN 4
/* enum: The PCS clock domain doesn't exist / isn't controlled */
#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0
/* Resulting MC frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16
+#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_LEN 4
/* enum: The MC clock domain doesn't exist / isn't controlled */
#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0
/* Resulting rmon frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20
+#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_LEN 4
/* enum: The rmon clock domain doesn't exist / isn't controlled */
#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0
/* Resulting vswitch frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24
+#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_LEN 4
/* enum: The vswitch clock domain doesn't exist / isn't controlled */
#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0
@@ -9692,11 +11530,12 @@
*/
#define MC_CMD_DPCPU_RPC 0xae
-#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DPCPU_RPC_IN msgrequest */
#define MC_CMD_DPCPU_RPC_IN_LEN 36
#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
+#define MC_CMD_DPCPU_RPC_IN_CPU_LEN 4
/* enum: RxDPCPU0 */
#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0
/* enum: TxDPCPU0 */
@@ -9761,12 +11600,15 @@
#define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24
/* Register data to write. Only valid in write/write-read. */
#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_LEN 4
/* Register address. */
#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_LEN 4
/* MC_CMD_DPCPU_RPC_OUT msgresponse */
#define MC_CMD_DPCPU_RPC_OUT_LEN 36
#define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0
+#define MC_CMD_DPCPU_RPC_OUT_RC_LEN 4
/* DATA */
#define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
#define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
@@ -9777,9 +11619,13 @@
#define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
#define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24
#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_LEN 4
#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_LEN 4
#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_LEN 4
#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_LEN 4
/***********************************/
@@ -9794,6 +11640,7 @@
#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
/* Interrupt level relative to base for function. */
#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0
+#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_LEN 4
/* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */
#define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0
@@ -9811,6 +11658,7 @@
#define MC_CMD_SHMBOOT_OP_IN_LEN 4
/* Identifies the operation to perform */
#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
+#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_LEN 4
/* enum: Copy slave_data section to the slave core. (Greenport only) */
#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0
@@ -9824,13 +11672,16 @@
*/
#define MC_CMD_CAP_BLK_READ 0xe7
-#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_CAP_BLK_READ_IN msgrequest */
#define MC_CMD_CAP_BLK_READ_IN_LEN 12
#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
+#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_LEN 4
#define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4
+#define MC_CMD_CAP_BLK_READ_IN_ADDR_LEN 4
#define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8
+#define MC_CMD_CAP_BLK_READ_IN_COUNT_LEN 4
/* MC_CMD_CAP_BLK_READ_OUT msgresponse */
#define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
@@ -9850,53 +11701,77 @@
*/
#define MC_CMD_DUMP_DO 0xe8
-#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DUMP_DO_IN msgrequest */
#define MC_CMD_DUMP_DO_IN_LEN 52
#define MC_CMD_DUMP_DO_IN_PADDING_OFST 0
+#define MC_CMD_DUMP_DO_IN_PADDING_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4
/* enum: The uart port this command was received over (if using a uart
* transport)
*/
#define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4
/* MC_CMD_DUMP_DO_OUT msgresponse */
#define MC_CMD_DUMP_DO_OUT_LEN 4
#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0
+#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_LEN 4
/***********************************/
@@ -9905,41 +11780,64 @@
*/
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
-#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4
/***********************************/
@@ -9950,17 +11848,20 @@
*/
#define MC_CMD_SET_PSU 0xea
-#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_SET_PSU_IN msgrequest */
#define MC_CMD_SET_PSU_IN_LEN 12
#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
+#define MC_CMD_SET_PSU_IN_PARAM_LEN 4
#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */
#define MC_CMD_SET_PSU_IN_RAIL_OFST 4
+#define MC_CMD_SET_PSU_IN_RAIL_LEN 4
#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */
#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */
/* desired value, eg voltage in mV */
#define MC_CMD_SET_PSU_IN_VALUE_OFST 8
+#define MC_CMD_SET_PSU_IN_VALUE_LEN 4
/* MC_CMD_SET_PSU_OUT msgresponse */
#define MC_CMD_SET_PSU_OUT_LEN 0
@@ -9980,7 +11881,9 @@
/* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */
#define MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8
#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0
+#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_LEN 4
#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4
+#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_LEN 4
/***********************************/
@@ -10016,12 +11919,16 @@
#define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
+#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_LEN 4
/* Offset at which to write the data */
#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4
+#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_LEN 4
/* Length of data */
#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8
+#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_LEN 4
/* Reserved for future use */
#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12
+#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_LEN 4
#define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16
#define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
#define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
@@ -10044,12 +11951,16 @@
#define MC_CMD_UART_RECV_DATA_OUT_LEN 16
/* CRC32 over OFFSET, LENGTH, RESERVED */
#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0
+#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_LEN 4
/* Offset from which to read the data */
#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4
+#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_LEN 4
/* Length of data */
#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8
+#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_LEN 4
/* Reserved for future use */
#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12
+#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_LEN 4
/* MC_CMD_UART_RECV_DATA_IN msgresponse */
#define MC_CMD_UART_RECV_DATA_IN_LENMIN 16
@@ -10057,12 +11968,16 @@
#define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
+#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_LEN 4
/* Offset at which to write the data */
#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_LEN 4
/* Length of data */
#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_LEN 4
/* Reserved for future use */
#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_LEN 4
#define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16
#define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
#define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
@@ -10075,14 +11990,16 @@
*/
#define MC_CMD_READ_FUSES 0xf0
-#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_READ_FUSES_IN msgrequest */
#define MC_CMD_READ_FUSES_IN_LEN 8
/* Offset in OTP to read */
#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
+#define MC_CMD_READ_FUSES_IN_OFFSET_LEN 4
/* Length of data to read in bytes */
#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
+#define MC_CMD_READ_FUSES_IN_LENGTH_LEN 4
/* MC_CMD_READ_FUSES_OUT msgresponse */
#define MC_CMD_READ_FUSES_OUT_LENMIN 4
@@ -10090,6 +12007,7 @@
#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
/* Length of returned OTP data in bytes */
#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
+#define MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4
/* Returned data */
#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4
#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1
@@ -10197,6 +12115,60 @@
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
/* enum: CTLE EQ Resistor (0-7, Medford) */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
+/* enum: CTLE gain (0-31, Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_GAIN 0xb
+/* enum: CTLE pole (0-31, Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_POLE 0xc
+/* enum: CTLE peaking (0-31, Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_PEAK 0xd
+/* enum: DFE Tap1 - even path (Medford2 - 6 bit signed (-29 - +29)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_EVEN 0xe
+/* enum: DFE Tap1 - odd path (Medford2 - 6 bit signed (-29 - +29)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_ODD 0xf
+/* enum: DFE Tap2 (Medford2 - 6 bit signed (-20 - +20)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x10
+/* enum: DFE Tap3 (Medford2 - 6 bit signed (-20 - +20)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x11
+/* enum: DFE Tap4 (Medford2 - 6 bit signed (-20 - +20)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x12
+/* enum: DFE Tap5 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x13
+/* enum: DFE Tap6 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP6 0x14
+/* enum: DFE Tap7 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP7 0x15
+/* enum: DFE Tap8 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP8 0x16
+/* enum: DFE Tap9 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP9 0x17
+/* enum: DFE Tap10 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP10 0x18
+/* enum: DFE Tap11 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP11 0x19
+/* enum: DFE Tap12 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP12 0x1a
+/* enum: I/Q clk offset (Medford2 - 4 bit signed (-5 - +5))) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_IQ_OFF 0x1b
+/* enum: Negative h1 polarity data sampler offset calibration code, even path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_EVEN 0x1c
+/* enum: Negative h1 polarity data sampler offset calibration code, odd path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_ODD 0x1d
+/* enum: Positive h1 polarity data sampler offset calibration code, even path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_EVEN 0x1e
+/* enum: Positive h1 polarity data sampler offset calibration code, odd path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_ODD 0x1f
+/* enum: CDR calibration loop code (Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_PVT 0x20
+/* enum: CDR integral loop code (Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_INTEG 0x21
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
@@ -10268,7 +12240,7 @@
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: TX Amplitude (Huntington, Medford) */
+/* enum: TX Amplitude (Huntington, Medford, Medford2) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0
/* enum: De-Emphasis Tap1 Magnitude (0-7) (Huntington) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1
@@ -10290,9 +12262,9 @@
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9
/* enum: TX Amplitude Fine control (Medford) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa
-/* enum: Pre-shoot Tap (Medford) */
+/* enum: Pre-shoot Tap (Medford, Medford2) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb
-/* enum: De-emphasis Tap (Medford) */
+/* enum: De-emphasis Tap (Medford, Medford2) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
@@ -10361,7 +12333,24 @@
/* Align the arguments to 32 bits */
#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+/* Port-relative lane to scan eye on */
#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_LEN 4
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN msgrequest */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LEN 12
+/* Requested operation */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_LEN 3
+/* Port-relative lane to scan eye on */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_OFST 4
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_LEN 4
+/* Scan duration / cycle count */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_OFST 8
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_LEN 4
/* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */
#define MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0
@@ -10393,10 +12382,12 @@
#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1
#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3
#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_LEN 4
/* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */
#define MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4
#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0
+#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_LEN 4
/***********************************/
@@ -10594,6 +12585,7 @@
#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_LEN 4
/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */
#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0
@@ -10636,6 +12628,7 @@
#define MC_CMD_LICENSING_IN_LEN 4
/* identifies the type of operation requested */
#define MC_CMD_LICENSING_IN_OP_OFST 0
+#define MC_CMD_LICENSING_IN_OP_LEN 4
/* enum: re-read and apply licenses after a license key partition update; note
* that this operation returns a zero-length response
*/
@@ -10647,23 +12640,30 @@
#define MC_CMD_LICENSING_OUT_LEN 28
/* count of application keys which are valid */
#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0
+#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_LEN 4
/* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with
* MC_CMD_FC_OP_LICENSE)
*/
#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4
+#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_LEN 4
/* count of application keys which are invalid due to being blacklisted */
#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8
+#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_LEN 4
/* count of application keys which are invalid due to being unverifiable */
#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12
+#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_LEN 4
/* count of application keys which are invalid due to being for the wrong node
*/
#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16
+#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_LEN 4
/* licensing state (for diagnostics; the exact meaning of the bits in this
* field are private to the firmware)
*/
#define MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20
+#define MC_CMD_LICENSING_OUT_LICENSING_STATE_LEN 4
/* licensing subsystem self-test report (for manftest) */
#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24
+#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_LEN 4
/* enum: licensing subsystem self-test failed */
#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0
/* enum: licensing subsystem self-test passed */
@@ -10683,6 +12683,7 @@
#define MC_CMD_LICENSING_V3_IN_LEN 4
/* identifies the type of operation requested */
#define MC_CMD_LICENSING_V3_IN_OP_OFST 0
+#define MC_CMD_LICENSING_V3_IN_OP_LEN 4
/* enum: re-read and apply licenses after a license key partition update; note
* that this operation returns a zero-length response
*/
@@ -10696,20 +12697,26 @@
#define MC_CMD_LICENSING_V3_OUT_LEN 88
/* count of keys which are valid */
#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0
+#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_LEN 4
/* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with
* MC_CMD_FC_OP_LICENSE)
*/
#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4
+#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_LEN 4
/* count of keys which are invalid due to being unverifiable */
#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8
+#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_LEN 4
/* count of keys which are invalid due to being for the wrong node */
#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12
+#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_LEN 4
/* licensing state (for diagnostics; the exact meaning of the bits in this
* field are private to the firmware)
*/
#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_LEN 4
/* licensing subsystem self-test report (for manftest) */
#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_LEN 4
/* enum: licensing subsystem self-test failed */
#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0
/* enum: licensing subsystem self-test passed */
@@ -10750,8 +12757,10 @@
#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num))
/* type of license (eg 3) */
#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4
/* length of the license ID (in bytes) */
#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_LEN 4
/* the unique license ID of the adapter */
#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8
#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1
@@ -10789,11 +12798,13 @@
#define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
/* application ID to query (LICENSED_APP_ID_xxx) */
#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_LEN 4
/* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */
#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4
/* state of this application */
#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_LEN 4
/* enum: no (or invalid) license is present for the application */
#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0
/* enum: a valid license is present for the application */
@@ -10824,6 +12835,7 @@
#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
/* state of this application */
#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_LEN 4
/* enum: no (or invalid) license is present for the application */
#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0
/* enum: a valid license is present for the application */
@@ -10874,8 +12886,10 @@
#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
/* application ID */
#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4
/* the type of operation requested */
#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
+#define MC_CMD_LICENSED_APP_OP_IN_OP_LEN 4
/* enum: validate application */
#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
/* enum: mask application */
@@ -10900,8 +12914,10 @@
#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
/* application ID */
#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_LEN 4
/* the type of operation requested */
#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_LEN 4
/* validation challenge */
#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
@@ -10910,6 +12926,7 @@
#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
/* feature expiry (time_t) */
#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_LEN 4
/* validation response */
#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
@@ -10918,10 +12935,13 @@
#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
/* application ID */
#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_LEN 4
/* the type of operation requested */
#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_LEN 4
/* flag */
#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_LEN 4
/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
@@ -10959,8 +12979,10 @@
#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96
/* application expiry time */
#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_LEN 4
/* application expiry units */
#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_LEN 4
/* enum: expiry units are accounting units */
#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0
/* enum: expiry units are calendar days */
@@ -10984,7 +13006,7 @@
*/
#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5
-#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12
@@ -10995,6 +13017,7 @@
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
/* whether to turn on or turn off the masked features */
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4
/* enum: turn the features off */
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0
/* enum: turn the features back on */
@@ -11014,12 +13037,13 @@
*/
#define MC_CMD_LICENSING_V3_TEMPORARY 0xd6
-#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
/* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4
/* operation code */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_LEN 4
/* enum: install a new license, overwriting any existing temporary license.
* This is an asynchronous operation owing to the time taken to validate an
* ECDSA license
@@ -11037,6 +13061,7 @@
/* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_LEN 4
/* ECDSA license and signature */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160
@@ -11044,15 +13069,18 @@
/* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_LEN 4
/* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_LEN 4
/* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */
#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12
/* status code */
#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_LEN 4
/* enum: finished validating and installing license */
#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0
/* enum: license validation and installation in progress */
@@ -11084,14 +13112,17 @@
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
/* configuration flags */
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
/* receive queue handle (for RSS mode, this is the base queue) */
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
/* enum: receive to just the specified queue */
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
/* enum: receive to multiple queues using RSS context */
@@ -11101,6 +13132,7 @@
* of 0xFFFFFFFF is guaranteed never to be a valid handle.
*/
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */
#define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0
@@ -11123,20 +13155,24 @@
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16
/* configuration flags */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
/* receiving queue handle (for RSS mode, this is the base queue) */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
/* enum: receiving to just the specified queue */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
/* enum: receiving to multiple queues using RSS context */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
/* RSS context (for RX_MODE_RSS) */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
/***********************************/
@@ -11153,6 +13189,7 @@
#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num))
/* the type of configuration setting to change */
#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
/* enum: Per-TXQ enable for multicast UDP destination lookup for possible
* internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.)
*/
@@ -11166,6 +13203,7 @@
* on the type of configuration setting being changed
*/
#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
/* new value: the details depend on the type of configuration setting being
* changed
*/
@@ -11190,12 +13228,14 @@
#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
/* the type of configuration setting to read */
#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
/* handle for the entity to query: queue handle, EVB port ID, etc. depending on
* the type of configuration setting being read
*/
#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
@@ -11228,12 +13268,15 @@
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16
/* configuration flags */
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
/* receive queue handle (for RSS mode, this is the base queue) */
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
/* enum: receive to just the specified queue */
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
/* enum: receive to multiple queues using RSS context */
@@ -11243,6 +13286,7 @@
* of 0xFFFFFFFF is guaranteed never to be a valid handle.
*/
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0
@@ -11265,18 +13309,22 @@
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16
/* configuration flags */
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
/* receiving queue handle (for RSS mode, this is the base queue) */
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
/* enum: receiving to just the specified queue */
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
/* enum: receiving to multiple queues using RSS context */
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
/* RSS context (for RX_MODE_RSS) */
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
/***********************************/
@@ -11291,16 +13339,22 @@
#define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8
/* The rx queue to get stats for. */
#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_LEN 4
#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_LEN 4
#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */
#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16
#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_LEN 4
#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_LEN 4
#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_LEN 4
#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_LEN 4
/***********************************/
@@ -11309,6 +13363,8 @@
*/
#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
+#define MC_CMD_0xfd_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0
@@ -11316,20 +13372,27 @@
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28
/* The maximum number of PFs the device can expose */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_LEN 4
/* The maximum number of VFs the device can expose in total */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_LEN 4
/* The maximum number of MSI-X vectors the device can provide in total */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_LEN 4
/* the number of MSI-X vectors the device will allocate by default to each PF
*/
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_LEN 4
/* the number of MSI-X vectors the device will allocate by default to each VF
*/
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_LEN 4
/* the maximum number of MSI-X vectors the device can allocate to any one PF */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_LEN 4
/* the maximum number of MSI-X vectors the device can allocate to any one VF */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_LEN 4
/***********************************/
@@ -11347,10 +13410,13 @@
#define MC_CMD_GET_PORT_MODES_OUT_LEN 12
/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */
#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
+#define MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4
/* Default (canonical) board mode */
#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
+#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_LEN 4
/* Current board mode */
#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
+#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4
/***********************************/
@@ -11359,21 +13425,26 @@
*/
#define MC_CMD_READ_ATB 0x100
-#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_READ_ATB_IN msgrequest */
#define MC_CMD_READ_ATB_IN_LEN 16
#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0
+#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_LEN 4
#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */
#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */
#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */
#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4
+#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_LEN 4
#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8
+#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_LEN 4
#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12
+#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_LEN 4
/* MC_CMD_READ_ATB_OUT msgresponse */
#define MC_CMD_READ_ATB_OUT_LEN 4
#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0
+#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_LEN 4
/***********************************/
@@ -11390,7 +13461,9 @@
/* Each workaround is represented by a single bit according to the enums below.
*/
#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_LEN 4
#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_LEN 4
/* enum: Bug 17230 work around. */
#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
/* enum: Bug 35388 work around (unsafe EVQ writes). */
@@ -11425,6 +13498,7 @@
* 1,3 = 0x00030001
*/
#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_LEN 4
#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0
#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16
#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16
@@ -11434,6 +13508,7 @@
* set to 1.
*/
#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4
+#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_LEN 4
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */
@@ -11460,6 +13535,10 @@
* only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT.
*/
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000
+/* enum: Privilege for insecure commands. Commands that belong to this group
+ * are not permitted on secure adapters regardless of the privilege mask.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE 0x4000
/* enum: Set this bit to indicate that a new privilege mask is to be set,
* otherwise the command will only read the existing mask.
*/
@@ -11469,6 +13548,7 @@
#define MC_CMD_PRIVILEGE_MASK_OUT_LEN 4
/* For an admin function, always all the privileges are reported. */
#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0
+#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_LEN 4
/***********************************/
@@ -11485,12 +13565,14 @@
* e.g. VF 1,3 = 0x00030001
*/
#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_LEN 4
#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
/* New link state mode to be set */
#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
+#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_LEN 4
#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */
#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */
#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */
@@ -11501,11 +13583,12 @@
/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4
#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_LEN 4
/***********************************/
/* MC_CMD_GET_SNAPSHOT_LENGTH
- * Obtain the curent range of allowable values for the SNAPSHOT_LENGTH
+ * Obtain the current range of allowable values for the SNAPSHOT_LENGTH
* parameter to MC_CMD_INIT_RXQ.
*/
#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
@@ -11519,8 +13602,10 @@
#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8
/* Minimum acceptable snapshot length. */
#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_LEN 4
/* Maximum acceptable snapshot length. */
#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_LEN 4
/***********************************/
@@ -11529,7 +13614,7 @@
*/
#define MC_CMD_FUSE_DIAGS 0x102
-#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_FUSE_DIAGS_IN msgrequest */
#define MC_CMD_FUSE_DIAGS_IN_LEN 0
@@ -11538,28 +13623,40 @@
#define MC_CMD_FUSE_DIAGS_OUT_LEN 48
/* Total number of mismatched bits between pairs in area 0 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_LEN 4
/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_LEN 4
/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_LEN 4
/* Checksum of data after logical OR of pairs in area 0 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_LEN 4
/* Total number of mismatched bits between pairs in area 1 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_LEN 4
/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_LEN 4
/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_LEN 4
/* Checksum of data after logical OR of pairs in area 1 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_LEN 4
/* Total number of mismatched bits between pairs in area 2 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_LEN 4
/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_LEN 4
/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_LEN 4
/* Checksum of data after logical OR of pairs in area 2 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_LEN 4
/***********************************/
@@ -11576,6 +13673,7 @@
#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
/* The groups of functions to have their privilege masks modified. */
#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_LEN 4
#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */
#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */
#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */
@@ -11584,6 +13682,7 @@
#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */
/* For VFS_OF_PF specify the PF, for ONE specify the target function */
#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4
#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
@@ -11592,10 +13691,12 @@
* refer to the command MC_CMD_PRIVILEGE_MASK
*/
#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_LEN 4
/* Privileges to be removed from the target functions. For privilege
* definitions refer to the command MC_CMD_PRIVILEGE_MASK
*/
#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
+#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_LEN 4
/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
@@ -11613,8 +13714,10 @@
#define MC_CMD_XPM_READ_BYTES_IN_LEN 8
/* Start address (byte) */
#define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0
+#define MC_CMD_XPM_READ_BYTES_IN_ADDR_LEN 4
/* Count (bytes) */
#define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4
+#define MC_CMD_XPM_READ_BYTES_IN_COUNT_LEN 4
/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
#define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
@@ -11633,7 +13736,7 @@
*/
#define MC_CMD_XPM_WRITE_BYTES 0x104
-#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
#define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
@@ -11641,8 +13744,10 @@
#define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
/* Start address (byte) */
#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
+#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_LEN 4
/* Count (bytes) */
#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4
+#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_LEN 4
/* Data */
#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8
#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
@@ -11659,14 +13764,16 @@
*/
#define MC_CMD_XPM_READ_SECTOR 0x105
-#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */
#define MC_CMD_XPM_READ_SECTOR_IN_LEN 8
/* Sector index */
#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0
+#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_LEN 4
/* Sector size */
#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4
+#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_LEN 4
/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
#define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
@@ -11674,9 +13781,11 @@
#define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
/* Sector type */
#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
+#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_LEN 4
#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */
#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */
#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_DATA 0x3 /* enum */
#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */
/* Sector data */
#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4
@@ -11691,7 +13800,7 @@
*/
#define MC_CMD_XPM_WRITE_SECTOR 0x106
-#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
@@ -11708,10 +13817,12 @@
#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3
/* Sector type */
#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
+#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
/* Sector size */
#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
+#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_LEN 4
/* Sector data */
#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12
#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
@@ -11722,6 +13833,7 @@
#define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
/* New sector index */
#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0
+#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_LEN 4
/***********************************/
@@ -11730,12 +13842,13 @@
*/
#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
-#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */
#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4
/* Sector index */
#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0
+#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_LEN 4
/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */
#define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0
@@ -11747,14 +13860,16 @@
*/
#define MC_CMD_XPM_BLANK_CHECK 0x108
-#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */
#define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8
/* Start address (byte) */
#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0
+#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_LEN 4
/* Count (bytes) */
#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4
+#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_LEN 4
/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
@@ -11762,6 +13877,7 @@
#define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
/* Total number of bad (non-blank) locations */
#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_LEN 4
/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit
* into MCDI response)
*/
@@ -11777,14 +13893,16 @@
*/
#define MC_CMD_XPM_REPAIR 0x109
-#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_REPAIR_IN msgrequest */
#define MC_CMD_XPM_REPAIR_IN_LEN 8
/* Start address (byte) */
#define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0
+#define MC_CMD_XPM_REPAIR_IN_ADDR_LEN 4
/* Count (bytes) */
#define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4
+#define MC_CMD_XPM_REPAIR_IN_COUNT_LEN 4
/* MC_CMD_XPM_REPAIR_OUT msgresponse */
#define MC_CMD_XPM_REPAIR_OUT_LEN 0
@@ -11797,7 +13915,7 @@
*/
#define MC_CMD_XPM_DECODER_TEST 0x10a
-#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */
#define MC_CMD_XPM_DECODER_TEST_IN_LEN 0
@@ -11816,7 +13934,7 @@
*/
#define MC_CMD_XPM_WRITE_TEST 0x10b
-#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */
#define MC_CMD_XPM_WRITE_TEST_IN_LEN 0
@@ -11842,10 +13960,13 @@
#define MC_CMD_EXEC_SIGNED_IN_LEN 28
/* the length of code to include in the CMAC */
#define MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0
+#define MC_CMD_EXEC_SIGNED_IN_CODELEN_LEN 4
/* the length of date to include in the CMAC */
#define MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4
+#define MC_CMD_EXEC_SIGNED_IN_DATALEN_LEN 4
/* the XPM sector containing the key to use */
#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8
+#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_LEN 4
/* the expected CMAC value */
#define MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12
#define MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16
@@ -11868,11 +13989,34 @@
#define MC_CMD_PREPARE_SIGNED_IN_LEN 4
/* the length of data area to clear */
#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0
+#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_LEN 4
/* MC_CMD_PREPARE_SIGNED_OUT msgresponse */
#define MC_CMD_PREPARE_SIGNED_OUT_LEN 0
+/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
+/* UDP port (the standard ports are named below but any port may be used) */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2
+/* enum: the IANA allocated UDP port for VXLAN */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5
+/* enum: the IANA allocated UDP port for Geneve */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16
+/* tunnel encapsulation protocol (only those named below are supported) */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2
+/* enum: This port will be used for VXLAN on both IPv4 and IPv6 */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0
+/* enum: This port will be used for Geneve on both IPv4 and IPv6 */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16
+
+
/***********************************/
/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS
* Configure UDP ports for tunnel encapsulation hardware acceleration. The
@@ -11913,27 +14057,6 @@
#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0
#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1
-/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
-/* UDP port (the standard ports are named below but any port may be used) */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2
-/* enum: the IANA allocated UDP port for VXLAN */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5
-/* enum: the IANA allocated UDP port for Geneve */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16
-/* tunnel encapsulation protocol (only those named below are supported) */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2
-/* enum: VXLAN */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0
-/* enum: Geneve */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16
-
/***********************************/
/* MC_CMD_RX_BALANCING
@@ -11950,12 +14073,16 @@
#define MC_CMD_RX_BALANCING_IN_LEN 16
/* The RX port whose upconverter table will be modified */
#define MC_CMD_RX_BALANCING_IN_PORT_OFST 0
+#define MC_CMD_RX_BALANCING_IN_PORT_LEN 4
/* The VLAN priority associated to the table index and vFIFO */
#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4
+#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 4
/* The resulting bit of SRC^DST for indexing the table */
#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8
+#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 4
/* The RX engine to which the vFIFO in the table entry will point to */
#define MC_CMD_RX_BALANCING_IN_ENG_OFST 12
+#define MC_CMD_RX_BALANCING_IN_ENG_LEN 4
/* MC_CMD_RX_BALANCING_OUT msgresponse */
#define MC_CMD_RX_BALANCING_OUT_LEN 0
@@ -11976,8 +14103,10 @@
#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num))
/* The tag to be appended */
#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_LEN 4
/* The length of the data */
#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_OFST 4
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_LEN 4
/* The data to be contained in the TLV structure */
#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_OFST 8
#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1
@@ -12002,6 +14131,7 @@
#define MC_CMD_XPM_VERIFY_CONTENTS_IN_LEN 4
/* Data type to be checked */
#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_OFST 0
+#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_LEN 4
/* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12
@@ -12009,10 +14139,13 @@
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num))
/* Number of sectors found (test builds only) */
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_LEN 4
/* Number of bytes found (test builds only) */
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_OFST 4
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_LEN 4
/* Length of signature */
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_OFST 8
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_LEN 4
/* Signature */
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_OFST 12
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1
@@ -12037,12 +14170,16 @@
#define MC_CMD_SET_EVQ_TMR_IN_LEN 16
/* Function-relative queue instance */
#define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_OFST 0
+#define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_LEN 4
/* Requested value for timer load (in nanoseconds) */
#define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_OFST 4
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_LEN 4
/* Requested value for timer reload (in nanoseconds) */
#define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_OFST 8
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_LEN 4
/* Timer mode. Meanings as per EVQ_TMR_REG.TC_TIMER_VAL */
#define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_OFST 12
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_LEN 4
#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS 0x0 /* enum */
#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START 0x1 /* enum */
#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START 0x2 /* enum */
@@ -12052,8 +14189,10 @@
#define MC_CMD_SET_EVQ_TMR_OUT_LEN 8
/* Actual value for timer load (in nanoseconds) */
#define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_OFST 0
+#define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_LEN 4
/* Actual value for timer reload (in nanoseconds) */
#define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_OFST 4
+#define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_LEN 4
/***********************************/
@@ -12071,29 +14210,35 @@
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN 36
/* Reserved for future use. */
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_LEN 4
/* For timers updated via writes to EVQ_TMR_REG, this is the time interval (in
* nanoseconds) for each increment of the timer load/reload count. The
* requested duration of a timer is this value multiplied by the timer
* load/reload count.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_OFST 4
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_LEN 4
/* For timers updated via writes to EVQ_TMR_REG, this is the maximum value
* allowed for timer load/reload counts.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_OFST 8
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_LEN 4
/* For timers updated via writes to EVQ_TMR_REG, timer load/reload counts not a
* multiple of this step size will be rounded in an implementation defined
* manner.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_OFST 12
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_LEN 4
/* Maximum timer duration (in nanoseconds) for timers updated via MCDI. Only
* meaningful if MC_CMD_SET_EVQ_TMR is implemented.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_OFST 16
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_LEN 4
/* Timer durations requested via MCDI that are not a multiple of this step size
* will be rounded up. Only meaningful if MC_CMD_SET_EVQ_TMR is implemented.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_OFST 20
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_LEN 4
/* For timers updated using the bug35388 workaround, this is the time interval
* (in nanoseconds) for each increment of the timer load/reload count. The
* requested duration of a timer is this value multiplied by the timer
@@ -12101,17 +14246,20 @@
* is enabled.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_OFST 24
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_LEN 4
/* For timers updated using the bug35388 workaround, this is the maximum value
* allowed for timer load/reload counts. This field is only meaningful if the
* bug35388 workaround is enabled.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_OFST 28
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_LEN 4
/* For timers updated using the bug35388 workaround, timer load/reload counts
* not a multiple of this step size will be rounded in an implementation
* defined manner. This field is only meaningful if the bug35388 workaround is
* enabled.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_LEN 4
/***********************************/
@@ -12129,19 +14277,24 @@
* local queue index.
*/
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_LEN 4
/* Will the common pool be used as TX_vFIFO_ULL (1) */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_OFST 4
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_LEN 4
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */
/* enum: Using this interface without TX_vFIFO_ULL is not supported for now */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0
/* Number of buffers to reserve for the common pool */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_OFST 8
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_LEN 4
/* TX datapath to which the Common Pool is connected to. */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_OFST 12
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_LEN 4
/* enum: Extracts information from function */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1
/* Network port or RX Engine to which the common pool connects. */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_OFST 16
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_LEN 4
/* enum: Extracts information from function */
/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */
@@ -12157,6 +14310,7 @@
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_LEN 4
/* ID of the common pool allocated */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_OFST 0
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_LEN 4
/***********************************/
@@ -12173,8 +14327,10 @@
/* Common pool previously allocated to which the new vFIFO will be associated
*/
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_OFST 0
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_LEN 4
/* Port or RX engine to associate the vFIFO egress */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_OFST 4
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_LEN 4
/* enum: Extracts information from common pool */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */
@@ -12187,12 +14343,15 @@
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5
/* Minimum number of buffers that the pool must have */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_OFST 8
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_LEN 4
/* enum: Do not check the space available */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0
/* Will the vFIFO be used as TX_vFIFO_ULL */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_OFST 12
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_LEN 4
/* Network priority of the vFIFO,if applicable */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_OFST 16
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_LEN 4
/* enum: Search for the lowest unused priority */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1
@@ -12200,8 +14359,10 @@
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_LEN 8
/* Short vFIFO ID */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_OFST 0
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_LEN 4
/* Network priority of the vFIFO */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_OFST 4
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_LEN 4
/***********************************/
@@ -12217,6 +14378,7 @@
#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_LEN 4
/* Short vFIFO ID */
#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_OFST 0
+#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_LEN 4
/* MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT msgresponse */
#define MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT_LEN 0
@@ -12235,6 +14397,7 @@
#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_LEN 4
/* Common pool ID given when pool allocated */
#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_OFST 0
+#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_LEN 4
/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT msgresponse */
#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT_LEN 0
@@ -12256,8 +14419,10 @@
#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_LEN 8
/* Available buffers for the ENG to NET vFIFOs. */
#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_OFST 0
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_LEN 4
/* Available buffers for the ENG to ENG and NET to ENG vFIFOs. */
#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_OFST 4
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_LEN 4
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 6e1f282b2976..ce8aabf9091e 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -171,89 +171,108 @@ static int efx_mcdi_mdio_write(struct net_device *net_dev,
return 0;
}
-static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
+static void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
{
- u32 result = 0;
+ #define SET_BIT(name) __set_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
+ linkset)
+ bitmap_zero(linkset, __ETHTOOL_LINK_MODE_MASK_NBITS);
switch (media) {
case MC_CMD_MEDIA_KX4:
- result |= SUPPORTED_Backplane;
+ SET_BIT(Backplane);
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
- result |= SUPPORTED_1000baseKX_Full;
+ SET_BIT(1000baseKX_Full);
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- result |= SUPPORTED_10000baseKX4_Full;
+ SET_BIT(10000baseKX4_Full);
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
- result |= SUPPORTED_40000baseKR4_Full;
+ SET_BIT(40000baseKR4_Full);
break;
case MC_CMD_MEDIA_XFP:
case MC_CMD_MEDIA_SFP_PLUS:
case MC_CMD_MEDIA_QSFP_PLUS:
- result |= SUPPORTED_FIBRE;
+ SET_BIT(FIBRE);
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
- result |= SUPPORTED_1000baseT_Full;
+ SET_BIT(1000baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- result |= SUPPORTED_10000baseT_Full;
+ SET_BIT(10000baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
- result |= SUPPORTED_40000baseCR4_Full;
+ SET_BIT(40000baseCR4_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN))
+ SET_BIT(100000baseCR4_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN))
+ SET_BIT(25000baseCR_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
+ SET_BIT(50000baseCR2_Full);
break;
case MC_CMD_MEDIA_BASE_T:
- result |= SUPPORTED_TP;
+ SET_BIT(TP);
if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
- result |= SUPPORTED_10baseT_Half;
+ SET_BIT(10baseT_Half);
if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
- result |= SUPPORTED_10baseT_Full;
+ SET_BIT(10baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
- result |= SUPPORTED_100baseT_Half;
+ SET_BIT(100baseT_Half);
if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
- result |= SUPPORTED_100baseT_Full;
+ SET_BIT(100baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
- result |= SUPPORTED_1000baseT_Half;
+ SET_BIT(1000baseT_Half);
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
- result |= SUPPORTED_1000baseT_Full;
+ SET_BIT(1000baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- result |= SUPPORTED_10000baseT_Full;
+ SET_BIT(10000baseT_Full);
break;
}
if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
- result |= SUPPORTED_Pause;
+ SET_BIT(Pause);
if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
- result |= SUPPORTED_Asym_Pause;
+ SET_BIT(Asym_Pause);
if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
- result |= SUPPORTED_Autoneg;
+ SET_BIT(Autoneg);
- return result;
+ #undef SET_BIT
}
-static u32 ethtool_to_mcdi_cap(u32 cap)
+static u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
{
u32 result = 0;
- if (cap & SUPPORTED_10baseT_Half)
+ #define TEST_BIT(name) test_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
+ linkset)
+
+ if (TEST_BIT(10baseT_Half))
result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN);
- if (cap & SUPPORTED_10baseT_Full)
+ if (TEST_BIT(10baseT_Full))
result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN);
- if (cap & SUPPORTED_100baseT_Half)
+ if (TEST_BIT(100baseT_Half))
result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN);
- if (cap & SUPPORTED_100baseT_Full)
+ if (TEST_BIT(100baseT_Full))
result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
- if (cap & SUPPORTED_1000baseT_Half)
+ if (TEST_BIT(1000baseT_Half))
result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
- if (cap & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseKX_Full))
+ if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full))
result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
- if (cap & (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full))
+ if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full))
result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
- if (cap & (SUPPORTED_40000baseCR4_Full | SUPPORTED_40000baseKR4_Full))
+ if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full))
result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
- if (cap & SUPPORTED_Pause)
+ if (TEST_BIT(100000baseCR4_Full))
+ result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN);
+ if (TEST_BIT(25000baseCR_Full))
+ result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN);
+ if (TEST_BIT(50000baseCR2_Full))
+ result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN);
+ if (TEST_BIT(Pause))
result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN);
- if (cap & SUPPORTED_Asym_Pause)
+ if (TEST_BIT(Asym_Pause))
result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN);
- if (cap & SUPPORTED_Autoneg)
+ if (TEST_BIT(Autoneg))
result |= (1 << MC_CMD_PHY_CAP_AN_LBN);
+ #undef TEST_BIT
+
return result;
}
@@ -285,7 +304,7 @@ static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
return flags;
}
-static u32 mcdi_to_ethtool_media(u32 media)
+static u8 mcdi_to_ethtool_media(u32 media)
{
switch (media) {
case MC_CMD_MEDIA_XAUI:
@@ -371,8 +390,8 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx)
caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
- efx->link_advertising =
- mcdi_to_ethtool_cap(phy_data->media, caps);
+ mcdi_to_ethtool_linkset(phy_data->media, caps,
+ efx->link_advertising);
else
phy_data->forced_cap = caps;
@@ -435,8 +454,8 @@ fail:
int efx_mcdi_port_reconfigure(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
- u32 caps = (efx->link_advertising ?
- ethtool_to_mcdi_cap(efx->link_advertising) :
+ u32 caps = (efx->link_advertising[0] ?
+ ethtool_linkset_to_mcdi_cap(efx->link_advertising) :
phy_cfg->forced_cap);
return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
@@ -509,34 +528,28 @@ static void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx,
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
int rc;
- u32 supported, advertising, lp_advertising;
- supported = mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap);
- advertising = efx->link_advertising;
cmd->base.speed = efx->link_state.speed;
cmd->base.duplex = efx->link_state.fd;
cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media);
cmd->base.phy_address = phy_cfg->port;
- cmd->base.autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg);
+ cmd->base.autoneg = !!(efx->link_advertising[0] & ADVERTISED_Autoneg);
cmd->base.mdio_support = (efx->mdio.mode_support &
(MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
+ mcdi_to_ethtool_linkset(phy_cfg->media, phy_cfg->supported_cap,
+ cmd->link_modes.supported);
+ memcpy(cmd->link_modes.advertising, efx->link_advertising,
+ sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
if (rc)
return;
- lp_advertising =
- mcdi_to_ethtool_cap(phy_cfg->media,
- MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP));
-
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
- lp_advertising);
+ mcdi_to_ethtool_linkset(phy_cfg->media,
+ MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP),
+ cmd->link_modes.lp_advertising);
}
static int
@@ -546,29 +559,28 @@ efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 caps;
int rc;
- u32 advertising;
-
- ethtool_convert_link_mode_to_legacy_u32(&advertising,
- cmd->link_modes.advertising);
if (cmd->base.autoneg) {
- caps = (ethtool_to_mcdi_cap(advertising) |
- 1 << MC_CMD_PHY_CAP_AN_LBN);
+ caps = (ethtool_linkset_to_mcdi_cap(cmd->link_modes.advertising) |
+ 1 << MC_CMD_PHY_CAP_AN_LBN);
} else if (cmd->base.duplex) {
switch (cmd->base.speed) {
- case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break;
- case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
- case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
- case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
- case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break;
- default: return -EINVAL;
+ case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break;
+ case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
+ case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
+ case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
+ case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break;
+ case 100000: caps = 1 << MC_CMD_PHY_CAP_100000FDX_LBN; break;
+ case 25000: caps = 1 << MC_CMD_PHY_CAP_25000FDX_LBN; break;
+ case 50000: caps = 1 << MC_CMD_PHY_CAP_50000FDX_LBN; break;
+ default: return -EINVAL;
}
} else {
switch (cmd->base.speed) {
- case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break;
- case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break;
- case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break;
- default: return -EINVAL;
+ case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break;
+ case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break;
+ case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break;
+ default: return -EINVAL;
}
}
@@ -578,11 +590,10 @@ efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
return rc;
if (cmd->base.autoneg) {
- efx_link_set_advertising(
- efx, advertising | ADVERTISED_Autoneg);
+ efx_link_set_advertising(efx, cmd->link_modes.advertising);
phy_cfg->forced_cap = 0;
} else {
- efx_link_set_advertising(efx, 0);
+ efx_link_clear_advertising(efx);
phy_cfg->forced_cap = caps;
}
return 0;
@@ -985,6 +996,9 @@ static unsigned int efx_mcdi_event_link_speed[] = {
[MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
[MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
[MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_25G] = 25000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_50G] = 50000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_100G] = 100000,
};
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
@@ -1087,7 +1101,7 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx,
int period = action == EFX_STATS_ENABLE ? 1000 : 0;
dma_addr_t dma_addr = efx->stats_buffer.dma_addr;
u32 dma_len = action != EFX_STATS_DISABLE ?
- MC_CMD_MAC_NSTATS * sizeof(u64) : 0;
+ efx->num_mac_stats * sizeof(u64) : 0;
BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
@@ -1121,7 +1135,7 @@ void efx_mcdi_mac_start_stats(struct efx_nic *efx)
{
__le64 *dma_stats = efx->stats_buffer.addr;
- dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+ dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0);
}
@@ -1139,10 +1153,10 @@ void efx_mcdi_mac_pull_stats(struct efx_nic *efx)
__le64 *dma_stats = efx->stats_buffer.addr;
int attempts = EFX_MAC_STATS_WAIT_ATTEMPTS;
- dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+ dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
efx_mcdi_mac_stats(efx, EFX_STATS_PULL, 0);
- while (dma_stats[MC_CMD_MAC_GENERATION_END] ==
+ while (dma_stats[efx->num_mac_stats - 1] ==
EFX_MC_STATS_GENERATION_INVALID &&
attempts-- != 0)
udelay(EFX_MAC_STATS_WAIT_US);
@@ -1167,7 +1181,7 @@ int efx_mcdi_port_probe(struct efx_nic *efx)
/* Allocate buffer for stats */
rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
- MC_CMD_MAC_NSTATS * sizeof(u64), GFP_KERNEL);
+ efx->num_mac_stats * sizeof(u64), GFP_KERNEL);
if (rc)
return rc;
netif_dbg(efx, probe, efx->net_dev,
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index c0537ea06c9a..d20a8660ee48 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -191,6 +191,7 @@ struct efx_tx_buffer {
* Size of the region is efx_piobuf_size.
* @piobuf_offset: Buffer offset to be specified in PIO descriptors
* @initialised: Has hardware queue been initialised?
+ * @timestamping: Is timestamping enabled for this channel?
* @handle_tso: TSO xmit preparation handler. Sets up the TSO metadata and
* may also map tx data, depending on the nature of the TSO implementation.
* @read_count: Current read pointer.
@@ -202,6 +203,10 @@ struct efx_tx_buffer {
* avoid cache-line ping-pong between the xmit path and the
* completion path.
* @merge_events: Number of TX merged completion events
+ * @completed_desc_ptr: Most recent completed pointer - only used with
+ * timestamping.
+ * @completed_timestamp_major: Top part of the most recent tx timestamp.
+ * @completed_timestamp_minor: Low part of the most recent tx timestamp.
* @insert_count: Current insert pointer
* This is the number of buffers that have been added to the
* software ring.
@@ -247,6 +252,7 @@ struct efx_tx_queue {
void __iomem *piobuf;
unsigned int piobuf_offset;
bool initialised;
+ bool timestamping;
/* Function pointers used in the fast path. */
int (*handle_tso)(struct efx_tx_queue*, struct sk_buff*, bool *);
@@ -257,6 +263,9 @@ struct efx_tx_queue {
unsigned int merge_events;
unsigned int bytes_compl;
unsigned int pkts_compl;
+ unsigned int completed_desc_ptr;
+ u32 completed_timestamp_major;
+ u32 completed_timestamp_minor;
/* Members used only on the xmit path */
unsigned int insert_count ____cacheline_aligned_in_smp;
@@ -522,8 +531,12 @@ struct efx_msi_context {
* @copy: Copy the channel state prior to reallocation. May be %NULL if
* reallocation is not supported.
* @receive_skb: Handle an skb ready to be passed to netif_receive_skb()
+ * @want_txqs: Determine whether this channel should have TX queues
+ * created. If %NULL, TX queues are not created.
* @keep_eventq: Flag for whether event queue should be kept initialised
* while the device is stopped
+ * @want_pio: Flag for whether PIO buffers should be linked to this
+ * channel's TX queues.
*/
struct efx_channel_type {
void (*handle_no_channel)(struct efx_nic *);
@@ -532,7 +545,9 @@ struct efx_channel_type {
void (*get_name)(struct efx_channel *, char *buf, size_t len);
struct efx_channel *(*copy)(const struct efx_channel *);
bool (*receive_skb)(struct efx_channel *, struct sk_buff *);
+ bool (*want_txqs)(struct efx_channel *);
bool keep_eventq;
+ bool want_pio;
};
enum efx_led_mode {
@@ -708,6 +723,7 @@ struct vfdi_status;
* @reset_work: Scheduled reset workitem
* @membase_phys: Memory BAR value as physical address
* @membase: Memory BAR value
+ * @vi_stride: step between per-VI registers / memory regions
* @interrupt_mode: Interrupt mode
* @timer_quantum_ns: Interrupt timer quantum, in nanoseconds
* @timer_max_ns: Interrupt timer maximum value, in nanoseconds
@@ -734,6 +750,7 @@ struct vfdi_status;
* @n_channels: Number of channels in use
* @n_rx_channels: Number of channels used for RX (= number of RX queues)
* @n_tx_channels: Number of channels used for TX
+ * @n_extra_tx_channels: Number of extra channels with TX queues
* @rx_ip_align: RX DMA address offset to have IP header aligned in
* in accordance with NET_IP_ALIGN
* @rx_dma_len: Current maximum RX DMA length
@@ -773,6 +790,8 @@ struct vfdi_status;
* @port_initialized: Port initialized?
* @net_dev: Operating system network device. Consider holding the rtnl lock
* @fixed_features: Features which cannot be turned off
+ * @num_mac_stats: Number of MAC stats reported by firmware (MAC_STATS_NUM_STATS
+ * field of %MC_CMD_GET_CAPABILITIES_V4 response, or %MC_CMD_MAC_NSTATS)
* @stats_buffer: DMA buffer for statistics
* @phy_type: PHY type
* @phy_op: PHY interface
@@ -812,6 +831,7 @@ struct vfdi_status;
* @vf_init_count: Number of VFs that have been fully initialised.
* @vi_scale: log2 number of vnics per VF.
* @ptp_data: PTP state data
+ * @ptp_warned: has this NIC seen and warned about unexpected PTP events?
* @vpd_sn: Serial number read from VPD
* @monitor_work: Hardware monitor workitem
* @biu_lock: BIU (bus interface unit) lock
@@ -842,6 +862,8 @@ struct efx_nic {
resource_size_t membase_phys;
void __iomem *membase;
+ unsigned int vi_stride;
+
enum efx_int_mode interrupt_mode;
unsigned int timer_quantum_ns;
unsigned int timer_max_ns;
@@ -875,6 +897,7 @@ struct efx_nic {
unsigned rss_spread;
unsigned tx_channel_offset;
unsigned n_tx_channels;
+ unsigned n_extra_tx_channels;
unsigned int rx_ip_align;
unsigned int rx_dma_len;
unsigned int rx_buffer_order;
@@ -918,6 +941,7 @@ struct efx_nic {
netdev_features_t fixed_features;
+ u16 num_mac_stats;
struct efx_buffer stats_buffer;
u64 rx_nodesc_drops_total;
u64 rx_nodesc_drops_while_down;
@@ -930,7 +954,7 @@ struct efx_nic {
unsigned int mdio_bus;
enum efx_phy_mode phy_mode;
- u32 link_advertising;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(link_advertising);
struct efx_link_state link_state;
unsigned int n_link_state_changes;
@@ -965,6 +989,7 @@ struct efx_nic {
#endif
struct efx_ptp_data *ptp_data;
+ bool ptp_warned;
char *vpd_sn;
@@ -1154,7 +1179,7 @@ struct efx_udp_tunnel {
*/
struct efx_nic_type {
bool is_vf;
- unsigned int mem_bar;
+ unsigned int (*mem_bar)(struct efx_nic *efx);
unsigned int (*mem_map_size)(struct efx_nic *efx);
int (*probe)(struct efx_nic *efx);
void (*remove)(struct efx_nic *efx);
@@ -1355,8 +1380,8 @@ efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
{
- return channel->channel - channel->efx->tx_channel_offset <
- channel->efx->n_tx_channels;
+ return channel->type && channel->type->want_txqs &&
+ channel->type->want_txqs(channel);
}
static inline struct efx_tx_queue *
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 7b51b6371724..6549fc685a48 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -325,6 +325,30 @@ enum {
EF10_STAT_tx_bad,
EF10_STAT_tx_bad_bytes,
EF10_STAT_tx_overflow,
+ EF10_STAT_V1_COUNT,
+ EF10_STAT_fec_uncorrected_errors = EF10_STAT_V1_COUNT,
+ EF10_STAT_fec_corrected_errors,
+ EF10_STAT_fec_corrected_symbols_lane0,
+ EF10_STAT_fec_corrected_symbols_lane1,
+ EF10_STAT_fec_corrected_symbols_lane2,
+ EF10_STAT_fec_corrected_symbols_lane3,
+ EF10_STAT_ctpio_dmabuf_start,
+ EF10_STAT_ctpio_vi_busy_fallback,
+ EF10_STAT_ctpio_long_write_success,
+ EF10_STAT_ctpio_missing_dbell_fail,
+ EF10_STAT_ctpio_overflow_fail,
+ EF10_STAT_ctpio_underflow_fail,
+ EF10_STAT_ctpio_timeout_fail,
+ EF10_STAT_ctpio_noncontig_wr_fail,
+ EF10_STAT_ctpio_frm_clobber_fail,
+ EF10_STAT_ctpio_invalid_wr_fail,
+ EF10_STAT_ctpio_vi_clobber_fallback,
+ EF10_STAT_ctpio_unqualified_fallback,
+ EF10_STAT_ctpio_runt_fallback,
+ EF10_STAT_ctpio_success,
+ EF10_STAT_ctpio_fallback,
+ EF10_STAT_ctpio_poison,
+ EF10_STAT_ctpio_erase,
EF10_STAT_COUNT
};
@@ -416,6 +440,7 @@ struct efx_ef10_nic_data {
struct efx_udp_tunnel udp_tunnels[16];
bool udp_tunnels_dirty;
struct mutex udp_tunnels_lock;
+ u64 licensed_features;
};
int efx_init_sriov(void);
@@ -424,6 +449,7 @@ void efx_fini_sriov(void);
struct ethtool_ts_info;
int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
+struct efx_channel *efx_ptp_channel(struct efx_nic *efx);
void efx_ptp_remove(struct efx_nic *efx);
int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
@@ -447,6 +473,8 @@ static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel,
}
void efx_ptp_start_datapath(struct efx_nic *efx);
void efx_ptp_stop_datapath(struct efx_nic *efx);
+bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx);
+ktime_t efx_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue);
extern const struct efx_nic_type falcon_a1_nic_type;
extern const struct efx_nic_type falcon_b0_nic_type;
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index caa89bf7603e..f21661532ed3 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -149,18 +149,14 @@ enum ptp_packet_state {
/* Maximum parts-per-billion adjustment that is acceptable */
#define MAX_PPB 1000000
-/* Number of bits required to hold the above */
-#define MAX_PPB_BITS 20
-
-/* Number of extra bits allowed when calculating fractional ns.
- * EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS + MAX_PPB_BITS should
- * be less than 63.
- */
-#define PPB_EXTRA_BITS 2
-
/* Precalculate scale word to avoid long long division at runtime */
-#define PPB_SCALE_WORD ((1LL << (PPB_EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS +\
- MAX_PPB_BITS)) / 1000000000LL)
+/* This is equivalent to 2^66 / 10^9. */
+#define PPB_SCALE_WORD ((1LL << (57)) / 1953125LL)
+
+/* How much to shift down after scaling to convert to FP40 */
+#define PPB_SHIFT_FP40 26
+/* ... and FP44. */
+#define PPB_SHIFT_FP44 22
#define PTP_SYNC_ATTEMPTS 4
@@ -218,8 +214,8 @@ struct efx_ptp_timeset {
* @channel: The PTP channel (Siena only)
* @rx_ts_inline: Flag for whether RX timestamps are inline (else they are
* separate events)
- * @rxq: Receive queue (awaiting timestamps)
- * @txq: Transmit queue
+ * @rxq: Receive SKB queue (awaiting timestamps)
+ * @txq: Transmit SKB queue
* @evt_list: List of MC receive events awaiting packets
* @evt_free_list: List of free events
* @evt_lock: Lock for manipulating evt_list and evt_free_list
@@ -233,19 +229,36 @@ struct efx_ptp_timeset {
* @config: Current timestamp configuration
* @enabled: PTP operation enabled
* @mode: Mode in which PTP operating (PTP version)
- * @time_format: Time format supported by this NIC
* @ns_to_nic_time: Function to convert from scalar nanoseconds to NIC time
* @nic_to_kernel_time: Function to convert from NIC to kernel time
+ * @nic_time.minor_max: Wrap point for NIC minor times
+ * @nic_time.sync_event_diff_min: Minimum acceptable difference between time
+ * in packet prefix and last MCDI time sync event i.e. how much earlier than
+ * the last sync event time a packet timestamp can be.
+ * @nic_time.sync_event_diff_max: Maximum acceptable difference between time
+ * in packet prefix and last MCDI time sync event i.e. how much later than
+ * the last sync event time a packet timestamp can be.
+ * @nic_time.sync_event_minor_shift: Shift required to make minor time from
+ * field in MCDI time sync event.
* @min_synchronisation_ns: Minimum acceptable corrected sync window
- * @ts_corrections.tx: Required driver correction of transmit timestamps
- * @ts_corrections.rx: Required driver correction of receive timestamps
+ * @capabilities: Capabilities flags from the NIC
+ * @ts_corrections.ptp_tx: Required driver correction of PTP packet transmit
+ * timestamps
+ * @ts_corrections.ptp_rx: Required driver correction of PTP packet receive
+ * timestamps
* @ts_corrections.pps_out: PPS output error (information only)
* @ts_corrections.pps_in: Required driver correction of PPS input timestamps
+ * @ts_corrections.general_tx: Required driver correction of general packet
+ * transmit timestamps
+ * @ts_corrections.general_rx: Required driver correction of general packet
+ * receive timestamps
* @evt_frags: Partly assembled PTP events
* @evt_frag_idx: Current fragment number
* @evt_code: Last event code
* @start: Address at which MC indicates ready for synchronisation
* @host_time_pps: Host time at last PPS
+ * @adjfreq_ppb_shift: Shift required to convert scaled parts-per-billion
+ * frequency adjustment into a fixed point fractional nanosecond format.
* @current_adjfreq: Current ppb adjustment.
* @phc_clock: Pointer to registered phc device (if primary function)
* @phc_clock_info: Registration structure for phc device
@@ -264,6 +277,7 @@ struct efx_ptp_timeset {
* @oversize_sync_windows: Number of corrected sync windows that are too large
* @rx_no_timestamp: Number of packets received without a timestamp.
* @timeset: Last set of synchronisation statistics.
+ * @xmit_skb: Transmit SKB function.
*/
struct efx_ptp_data {
struct efx_nic *efx;
@@ -284,22 +298,31 @@ struct efx_ptp_data {
struct hwtstamp_config config;
bool enabled;
unsigned int mode;
- unsigned int time_format;
void (*ns_to_nic_time)(s64 ns, u32 *nic_major, u32 *nic_minor);
ktime_t (*nic_to_kernel_time)(u32 nic_major, u32 nic_minor,
s32 correction);
+ struct {
+ u32 minor_max;
+ u32 sync_event_diff_min;
+ u32 sync_event_diff_max;
+ unsigned int sync_event_minor_shift;
+ } nic_time;
unsigned int min_synchronisation_ns;
+ unsigned int capabilities;
struct {
- s32 tx;
- s32 rx;
+ s32 ptp_tx;
+ s32 ptp_rx;
s32 pps_out;
s32 pps_in;
+ s32 general_tx;
+ s32 general_rx;
} ts_corrections;
efx_qword_t evt_frags[MAX_EVENT_FRAGS];
int evt_frag_idx;
int evt_code;
struct efx_buffer start;
struct pps_event_time host_time_pps;
+ unsigned int adjfreq_ppb_shift;
s64 current_adjfreq;
struct ptp_clock *phc_clock;
struct ptp_clock_info phc_clock_info;
@@ -319,6 +342,7 @@ struct efx_ptp_data {
unsigned int rx_no_timestamp;
struct efx_ptp_timeset
timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM];
+ void (*xmit_skb)(struct efx_nic *efx, struct sk_buff *skb);
};
static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta);
@@ -329,6 +353,24 @@ static int efx_phc_settime(struct ptp_clock_info *ptp,
static int efx_phc_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *request, int on);
+bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ return ((efx_nic_rev(efx) >= EFX_REV_HUNT_A0) &&
+ (nic_data->datapath_caps2 &
+ (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_LBN)
+ ));
+}
+
+/* PTP 'extra' channel is still a traffic channel, but we only create TX queues
+ * if PTP uses MAC TX timestamps, not if PTP uses the MC directly to transmit.
+ */
+static bool efx_ptp_want_txqs(struct efx_channel *channel)
+{
+ return efx_ptp_use_mac_tx_timestamps(channel->efx);
+}
+
#define PTP_SW_STAT(ext_name, field_name) \
{ #ext_name, 0, offsetof(struct efx_ptp_data, field_name) }
#define PTP_MC_STAT(ext_name, mcdi_name) \
@@ -471,6 +513,89 @@ static ktime_t efx_ptp_s27_to_ktime_correction(u32 nic_major, u32 nic_minor,
return efx_ptp_s27_to_ktime(nic_major, nic_minor);
}
+/* For Medford2 platforms the time is in seconds and quarter nanoseconds. */
+static void efx_ptp_ns_to_s_qns(s64 ns, u32 *nic_major, u32 *nic_minor)
+{
+ struct timespec64 ts = ns_to_timespec64(ns);
+
+ *nic_major = (u32)ts.tv_sec;
+ *nic_minor = ts.tv_nsec * 4;
+}
+
+static ktime_t efx_ptp_s_qns_to_ktime_correction(u32 nic_major, u32 nic_minor,
+ s32 correction)
+{
+ ktime_t kt;
+
+ nic_minor = DIV_ROUND_CLOSEST(nic_minor, 4);
+ correction = DIV_ROUND_CLOSEST(correction, 4);
+
+ kt = ktime_set(nic_major, nic_minor);
+
+ if (correction >= 0)
+ kt = ktime_add_ns(kt, (u64)correction);
+ else
+ kt = ktime_sub_ns(kt, (u64)-correction);
+ return kt;
+}
+
+struct efx_channel *efx_ptp_channel(struct efx_nic *efx)
+{
+ return efx->ptp_data ? efx->ptp_data->channel : NULL;
+}
+
+static u32 last_sync_timestamp_major(struct efx_nic *efx)
+{
+ struct efx_channel *channel = efx_ptp_channel(efx);
+ u32 major = 0;
+
+ if (channel)
+ major = channel->sync_timestamp_major;
+ return major;
+}
+
+/* The 8000 series and later can provide the time from the MAC, which is only
+ * 48 bits long and provides meta-information in the top 2 bits.
+ */
+static ktime_t
+efx_ptp_mac_nic_to_ktime_correction(struct efx_nic *efx,
+ struct efx_ptp_data *ptp,
+ u32 nic_major, u32 nic_minor,
+ s32 correction)
+{
+ ktime_t kt = { 0 };
+
+ if (!(nic_major & 0x80000000)) {
+ WARN_ON_ONCE(nic_major >> 16);
+ /* Use the top bits from the latest sync event. */
+ nic_major &= 0xffff;
+ nic_major |= (last_sync_timestamp_major(efx) & 0xffff0000);
+
+ kt = ptp->nic_to_kernel_time(nic_major, nic_minor,
+ correction);
+ }
+ return kt;
+}
+
+ktime_t efx_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ ktime_t kt;
+
+ if (efx_ptp_use_mac_tx_timestamps(efx))
+ kt = efx_ptp_mac_nic_to_ktime_correction(efx, ptp,
+ tx_queue->completed_timestamp_major,
+ tx_queue->completed_timestamp_minor,
+ ptp->ts_corrections.general_tx);
+ else
+ kt = ptp->nic_to_kernel_time(
+ tx_queue->completed_timestamp_major,
+ tx_queue->completed_timestamp_minor,
+ ptp->ts_corrections.general_tx);
+ return kt;
+}
+
/* Get PTP attributes and set up time conversions */
static int efx_ptp_get_attributes(struct efx_nic *efx)
{
@@ -502,31 +627,71 @@ static int efx_ptp_get_attributes(struct efx_nic *efx)
return rc;
}
- if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION) {
+ switch (fmt) {
+ case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION:
ptp->ns_to_nic_time = efx_ptp_ns_to_s27;
ptp->nic_to_kernel_time = efx_ptp_s27_to_ktime_correction;
- } else if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS) {
+ ptp->nic_time.minor_max = 1 << 27;
+ ptp->nic_time.sync_event_minor_shift = 19;
+ break;
+ case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS:
ptp->ns_to_nic_time = efx_ptp_ns_to_s_ns;
ptp->nic_to_kernel_time = efx_ptp_s_ns_to_ktime_correction;
- } else {
+ ptp->nic_time.minor_max = 1000000000;
+ ptp->nic_time.sync_event_minor_shift = 22;
+ break;
+ case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS:
+ ptp->ns_to_nic_time = efx_ptp_ns_to_s_qns;
+ ptp->nic_to_kernel_time = efx_ptp_s_qns_to_ktime_correction;
+ ptp->nic_time.minor_max = 4000000000UL;
+ ptp->nic_time.sync_event_minor_shift = 24;
+ break;
+ default:
return -ERANGE;
}
- ptp->time_format = fmt;
-
- /* MC_CMD_PTP_OP_GET_ATTRIBUTES is an extended version of an older
- * operation MC_CMD_PTP_OP_GET_TIME_FORMAT that also returns a value
- * to use for the minimum acceptable corrected synchronization window.
+ /* Precalculate acceptable difference between the minor time in the
+ * packet prefix and the last MCDI time sync event. We expect the
+ * packet prefix timestamp to be after of sync event by up to one
+ * sync event interval (0.25s) but we allow it to exceed this by a
+ * fuzz factor of (0.1s)
+ */
+ ptp->nic_time.sync_event_diff_min = ptp->nic_time.minor_max
+ - (ptp->nic_time.minor_max / 10);
+ ptp->nic_time.sync_event_diff_max = (ptp->nic_time.minor_max / 4)
+ + (ptp->nic_time.minor_max / 10);
+
+ /* MC_CMD_PTP_OP_GET_ATTRIBUTES has been extended twice from an older
+ * operation MC_CMD_PTP_OP_GET_TIME_FORMAT. The function now may return
+ * a value to use for the minimum acceptable corrected synchronization
+ * window and may return further capabilities.
* If we have the extra information store it. For older firmware that
* does not implement the extended command use the default value.
*/
- if (rc == 0 && out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN)
+ if (rc == 0 &&
+ out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST)
ptp->min_synchronisation_ns =
MCDI_DWORD(outbuf,
PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN);
else
ptp->min_synchronisation_ns = DEFAULT_MIN_SYNCHRONISATION_NS;
+ if (rc == 0 &&
+ out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN)
+ ptp->capabilities = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_ATTRIBUTES_CAPABILITIES);
+ else
+ ptp->capabilities = 0;
+
+ /* Set up the shift for conversion between frequency
+ * adjustments in parts-per-billion and the fixed-point
+ * fractional ns format that the adapter uses.
+ */
+ if (ptp->capabilities & (1 << MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN))
+ ptp->adjfreq_ppb_shift = PPB_SHIFT_FP44;
+ else
+ ptp->adjfreq_ppb_shift = PPB_SHIFT_FP40;
+
return 0;
}
@@ -534,8 +699,9 @@ static int efx_ptp_get_attributes(struct efx_nic *efx)
static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN);
int rc;
+ size_t out_len;
/* Get the timestamp corrections from the NIC. If this operation is
* not supported (older NICs) then no correction is required.
@@ -545,21 +711,37 @@ static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), NULL);
+ outbuf, sizeof(outbuf), &out_len);
if (rc == 0) {
- efx->ptp_data->ts_corrections.tx = MCDI_DWORD(outbuf,
+ efx->ptp_data->ts_corrections.ptp_tx = MCDI_DWORD(outbuf,
PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT);
- efx->ptp_data->ts_corrections.rx = MCDI_DWORD(outbuf,
+ efx->ptp_data->ts_corrections.ptp_rx = MCDI_DWORD(outbuf,
PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE);
efx->ptp_data->ts_corrections.pps_out = MCDI_DWORD(outbuf,
PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT);
efx->ptp_data->ts_corrections.pps_in = MCDI_DWORD(outbuf,
PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN);
+
+ if (out_len >= MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN) {
+ efx->ptp_data->ts_corrections.general_tx = MCDI_DWORD(
+ outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX);
+ efx->ptp_data->ts_corrections.general_rx = MCDI_DWORD(
+ outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX);
+ } else {
+ efx->ptp_data->ts_corrections.general_tx =
+ efx->ptp_data->ts_corrections.ptp_tx;
+ efx->ptp_data->ts_corrections.general_rx =
+ efx->ptp_data->ts_corrections.ptp_rx;
+ }
} else if (rc == -EINVAL) {
- efx->ptp_data->ts_corrections.tx = 0;
- efx->ptp_data->ts_corrections.rx = 0;
+ efx->ptp_data->ts_corrections.ptp_tx = 0;
+ efx->ptp_data->ts_corrections.ptp_rx = 0;
efx->ptp_data->ts_corrections.pps_out = 0;
efx->ptp_data->ts_corrections.pps_in = 0;
+ efx->ptp_data->ts_corrections.general_tx = 0;
+ efx->ptp_data->ts_corrections.general_rx = 0;
} else {
efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf), outbuf,
sizeof(outbuf), rc);
@@ -873,8 +1055,24 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
return rc;
}
+/* Transmit a PTP packet via the dedicated hardware timestamped queue. */
+static void efx_ptp_xmit_skb_queue(struct efx_nic *efx, struct sk_buff *skb)
+{
+ struct efx_ptp_data *ptp_data = efx->ptp_data;
+ struct efx_tx_queue *tx_queue;
+ u8 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
+
+ tx_queue = &ptp_data->channel->tx_queue[type];
+ if (tx_queue && tx_queue->timestamping) {
+ efx_enqueue_skb(tx_queue, skb);
+ } else {
+ WARN_ONCE(1, "PTP channel has no timestamped tx queue\n");
+ dev_kfree_skb_any(skb);
+ }
+}
+
/* Transmit a PTP packet, via the MCDI interface, to the wire. */
-static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
+static void efx_ptp_xmit_skb_mc(struct efx_nic *efx, struct sk_buff *skb)
{
struct efx_ptp_data *ptp_data = efx->ptp_data;
struct skb_shared_hwtstamps timestamps;
@@ -910,16 +1108,16 @@ static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
timestamps.hwtstamp = ptp_data->nic_to_kernel_time(
MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MAJOR),
MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MINOR),
- ptp_data->ts_corrections.tx);
+ ptp_data->ts_corrections.ptp_tx);
skb_tstamp_tx(skb, &timestamps);
rc = 0;
fail:
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
- return rc;
+ return;
}
static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
@@ -1189,7 +1387,7 @@ static void efx_ptp_worker(struct work_struct *work)
efx_ptp_process_events(efx, &tempq);
while ((skb = skb_dequeue(&ptp_data->txq)))
- efx_ptp_xmit_skb(efx, skb);
+ ptp_data->xmit_skb(efx, skb);
while ((skb = __skb_dequeue(&tempq)))
efx_ptp_process_rx(efx, skb);
@@ -1239,6 +1437,14 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
goto fail2;
}
+ if (efx_ptp_use_mac_tx_timestamps(efx)) {
+ ptp->xmit_skb = efx_ptp_xmit_skb_queue;
+ /* Request sync events on this channel. */
+ channel->sync_events_state = SYNC_EVENTS_QUIESCENT;
+ } else {
+ ptp->xmit_skb = efx_ptp_xmit_skb_mc;
+ }
+
INIT_WORK(&ptp->work, efx_ptp_worker);
ptp->config.flags = 0;
ptp->config.tx_type = HWTSTAMP_TX_OFF;
@@ -1303,11 +1509,21 @@ fail1:
static int efx_ptp_probe_channel(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
+ int rc;
channel->irq_moderation_us = 0;
channel->rx_queue.core_index = 0;
- return efx_ptp_probe(efx, channel);
+ rc = efx_ptp_probe(efx, channel);
+ /* Failure to probe PTP is not fatal; this channel will just not be
+ * used for anything.
+ * In the case of EPERM, efx_ptp_probe will print its own message (in
+ * efx_ptp_get_attributes()), so we don't need to.
+ */
+ if (rc && rc != -EPERM)
+ netif_warn(efx, drv, efx->net_dev,
+ "Failed to probe PTP, rc=%d\n", rc);
+ return 0;
}
void efx_ptp_remove(struct efx_nic *efx)
@@ -1332,6 +1548,7 @@ void efx_ptp_remove(struct efx_nic *efx)
efx_nic_free_buffer(efx, &efx->ptp_data->start);
kfree(efx->ptp_data);
+ efx->ptp_data = NULL;
}
static void efx_ptp_remove_channel(struct efx_channel *channel)
@@ -1548,6 +1765,17 @@ void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info)
ts_info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE);
+ /* Check licensed features. If we don't have the license for TX
+ * timestamps, the NIC will not support them.
+ */
+ if (efx_ptp_use_mac_tx_timestamps(efx)) {
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ if (!(nic_data->licensed_features &
+ (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN)))
+ ts_info->so_timestamping &=
+ ~SOF_TIMESTAMPING_TX_HARDWARE;
+ }
if (primary && primary->ptp_data && primary->ptp_data->phc_clock)
ts_info->phc_index =
ptp_clock_index(primary->ptp_data->phc_clock);
@@ -1627,7 +1855,7 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
evt->hwtimestamp = efx->ptp_data->nic_to_kernel_time(
EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA),
EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA),
- ptp->ts_corrections.rx);
+ ptp->ts_corrections.ptp_rx);
evt->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
list_add_tail(&evt->link, &ptp->evt_list);
@@ -1662,9 +1890,11 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE);
if (!ptp) {
- if (net_ratelimit())
+ if (!efx->ptp_warned) {
netif_warn(efx, drv, efx->net_dev,
"Received PTP event but PTP not set up\n");
+ efx->ptp_warned = true;
+ }
return;
}
@@ -1707,9 +1937,20 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev)
{
+ struct efx_nic *efx = channel->efx;
+ struct efx_ptp_data *ptp = efx->ptp_data;
+
+ /* When extracting the sync timestamp minor value, we should discard
+ * the least significant two bits. These are not required in order
+ * to reconstruct full-range timestamps and they are optionally used
+ * to report status depending on the options supplied when subscribing
+ * for sync events.
+ */
channel->sync_timestamp_major = MCDI_EVENT_FIELD(*ev, PTP_TIME_MAJOR);
channel->sync_timestamp_minor =
- MCDI_EVENT_FIELD(*ev, PTP_TIME_MINOR_26_19) << 19;
+ (MCDI_EVENT_FIELD(*ev, PTP_TIME_MINOR_MS_8BITS) & 0xFC)
+ << ptp->nic_time.sync_event_minor_shift;
+
/* if sync events have been disabled then we want to silently ignore
* this event, so throw away result.
*/
@@ -1717,15 +1958,6 @@ void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev)
SYNC_EVENTS_VALID);
}
-/* make some assumptions about the time representation rather than abstract it,
- * since we currently only support one type of inline timestamping and only on
- * EF10.
- */
-#define MINOR_TICKS_PER_SECOND 0x8000000
-/* Fuzz factor for sync events to be out of order with RX events */
-#define FUZZ (MINOR_TICKS_PER_SECOND / 10)
-#define EXPECTED_SYNC_EVENTS_PER_SECOND 4
-
static inline u32 efx_rx_buf_timestamp_minor(struct efx_nic *efx, const u8 *eh)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
@@ -1743,31 +1975,33 @@ void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
struct sk_buff *skb)
{
struct efx_nic *efx = channel->efx;
+ struct efx_ptp_data *ptp = efx->ptp_data;
u32 pkt_timestamp_major, pkt_timestamp_minor;
u32 diff, carry;
struct skb_shared_hwtstamps *timestamps;
- pkt_timestamp_minor = (efx_rx_buf_timestamp_minor(efx,
- skb_mac_header(skb)) +
- (u32) efx->ptp_data->ts_corrections.rx) &
- (MINOR_TICKS_PER_SECOND - 1);
+ if (channel->sync_events_state != SYNC_EVENTS_VALID)
+ return;
+
+ pkt_timestamp_minor = efx_rx_buf_timestamp_minor(efx, skb_mac_header(skb));
/* get the difference between the packet and sync timestamps,
* modulo one second
*/
- diff = (pkt_timestamp_minor - channel->sync_timestamp_minor) &
- (MINOR_TICKS_PER_SECOND - 1);
+ diff = pkt_timestamp_minor - channel->sync_timestamp_minor;
+ if (pkt_timestamp_minor < channel->sync_timestamp_minor)
+ diff += ptp->nic_time.minor_max;
+
/* do we roll over a second boundary and need to carry the one? */
- carry = channel->sync_timestamp_minor + diff > MINOR_TICKS_PER_SECOND ?
+ carry = (channel->sync_timestamp_minor >= ptp->nic_time.minor_max - diff) ?
1 : 0;
- if (diff <= MINOR_TICKS_PER_SECOND / EXPECTED_SYNC_EVENTS_PER_SECOND +
- FUZZ) {
+ if (diff <= ptp->nic_time.sync_event_diff_max) {
/* packet is ahead of the sync event by a quarter of a second or
* less (allowing for fuzz)
*/
pkt_timestamp_major = channel->sync_timestamp_major + carry;
- } else if (diff >= MINOR_TICKS_PER_SECOND - FUZZ) {
+ } else if (diff >= ptp->nic_time.sync_event_diff_min) {
/* packet is behind the sync event but within the fuzz factor.
* This means the RX packet and sync event crossed as they were
* placed on the event queue, which can sometimes happen.
@@ -1789,7 +2023,9 @@ void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
/* attach the timestamps to the skb */
timestamps = skb_hwtstamps(skb);
timestamps->hwtstamp =
- efx_ptp_s27_to_ktime(pkt_timestamp_major, pkt_timestamp_minor);
+ ptp->nic_to_kernel_time(pkt_timestamp_major,
+ pkt_timestamp_minor,
+ ptp->ts_corrections.general_rx);
}
static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
@@ -1807,9 +2043,10 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
else if (delta < -MAX_PPB)
delta = -MAX_PPB;
- /* Convert ppb to fixed point ns. */
- adjustment_ns = (((s64)delta * PPB_SCALE_WORD) >>
- (PPB_EXTRA_BITS + MAX_PPB_BITS));
+ /* Convert ppb to fixed point ns taking care to round correctly. */
+ adjustment_ns = ((s64)delta * PPB_SCALE_WORD +
+ (1 << (ptp_data->adjfreq_ppb_shift - 1))) >>
+ ptp_data->adjfreq_ppb_shift;
MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
MCDI_SET_DWORD(inadj, PTP_IN_PERIPH_ID, 0);
@@ -1916,6 +2153,7 @@ static const struct efx_channel_type efx_ptp_channel_type = {
.get_name = efx_ptp_get_channel_name,
/* no copy operation; there is no need to reallocate this channel */
.receive_skb = efx_ptp_rx,
+ .want_txqs = efx_ptp_want_txqs,
.keep_eventq = false,
};
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index a617f657eae3..ae8645ae4492 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -242,6 +242,14 @@ static int siena_dimension_resources(struct efx_nic *efx)
return 0;
}
+/* On all Falcon-architecture NICs, PFs use BAR 0 for I/O space and BAR 2(&3)
+ * for memory.
+ */
+static unsigned int siena_mem_bar(struct efx_nic *efx)
+{
+ return 2;
+}
+
static unsigned int siena_mem_map_size(struct efx_nic *efx)
{
return FR_CZ_MC_TREG_SMEM +
@@ -547,7 +555,7 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
dma_stats = efx->stats_buffer.addr;
- generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+ generation_end = dma_stats[efx->num_mac_stats - 1];
if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
return 0;
rmb();
@@ -950,7 +958,7 @@ fail:
const struct efx_nic_type siena_a0_nic_type = {
.is_vf = false,
- .mem_bar = EFX_MEM_BAR,
+ .mem_bar = siena_mem_bar,
.mem_map_size = siena_mem_map_size,
.probe = siena_probe_nic,
.remove = siena_remove_nic,
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 9937a2450e57..cece961f2e82 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -77,9 +77,23 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
}
if (buffer->flags & EFX_TX_BUF_SKB) {
+ struct sk_buff *skb = (struct sk_buff *)buffer->skb;
+
EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
(*pkts_compl)++;
- (*bytes_compl) += buffer->skb->len;
+ (*bytes_compl) += skb->len;
+ if (tx_queue->timestamping &&
+ (tx_queue->completed_timestamp_major ||
+ tx_queue->completed_timestamp_minor)) {
+ struct skb_shared_hwtstamps hwtstamp;
+
+ hwtstamp.hwtstamp =
+ efx_ptp_nic_to_kernel_time(tx_queue);
+ skb_tstamp_tx(skb, &hwtstamp);
+
+ tx_queue->completed_timestamp_major = 0;
+ tx_queue->completed_timestamp_minor = 0;
+ }
dev_consume_skb_any((struct sk_buff *)buffer->skb);
netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
"TX queue %d transmission id %x complete\n",
@@ -828,6 +842,11 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->old_read_count = 0;
tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
tx_queue->xmit_more_available = false;
+ tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
+ tx_queue->channel == efx_ptp_channel(efx));
+ tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
+ tx_queue->completed_timestamp_major = 0;
+ tx_queue->completed_timestamp_minor = 0;
/* Set up default function pointers. These may get replaced by
* efx_nic_init_tx() based off NIC/queue capabilities.
diff --git a/drivers/net/ethernet/socionext/Kconfig b/drivers/net/ethernet/socionext/Kconfig
new file mode 100644
index 000000000000..6bcfe27fc560
--- /dev/null
+++ b/drivers/net/ethernet/socionext/Kconfig
@@ -0,0 +1,34 @@
+config NET_VENDOR_SOCIONEXT
+ bool "Socionext ethernet drivers"
+ default y
+ ---help---
+ Option to select ethernet drivers for Socionext platforms.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Socionext devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_SOCIONEXT
+
+config SNI_AVE
+ tristate "Socionext AVE ethernet support"
+ depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF
+ select PHYLIB
+ ---help---
+ Driver for gigabit ethernet MACs, called AVE, in the
+ Socionext UniPhier family.
+
+config SNI_NETSEC
+ tristate "Socionext NETSEC ethernet support"
+ depends on (ARCH_SYNQUACER || COMPILE_TEST) && OF
+ select PHYLIB
+ select MII
+ ---help---
+ Enable to add support for the SocioNext NetSec Gigabit Ethernet
+ controller + PHY, as found on the Synquacer SC2A11 SoC
+
+ To compile this driver as a module, choose M here: the module will be
+ called netsec. If unsure, say N.
+
+endif #NET_VENDOR_SOCIONEXT
diff --git a/drivers/net/ethernet/socionext/Makefile b/drivers/net/ethernet/socionext/Makefile
new file mode 100644
index 000000000000..7fd837a999fd
--- /dev/null
+++ b/drivers/net/ethernet/socionext/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for all ethernet ip drivers on Socionext platforms
+#
+obj-$(CONFIG_SNI_AVE) += sni_ave.o
+obj-$(CONFIG_SNI_NETSEC) += netsec.o
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
new file mode 100644
index 000000000000..f4c0b02ddad8
--- /dev/null
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -0,0 +1,1777 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/acpi.h>
+#include <linux/of_mdio.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#include <net/tcp.h>
+#include <net/ip6_checksum.h>
+
+#define NETSEC_REG_SOFT_RST 0x104
+#define NETSEC_REG_COM_INIT 0x120
+
+#define NETSEC_REG_TOP_STATUS 0x200
+#define NETSEC_IRQ_RX BIT(1)
+#define NETSEC_IRQ_TX BIT(0)
+
+#define NETSEC_REG_TOP_INTEN 0x204
+#define NETSEC_REG_INTEN_SET 0x234
+#define NETSEC_REG_INTEN_CLR 0x238
+
+#define NETSEC_REG_NRM_TX_STATUS 0x400
+#define NETSEC_REG_NRM_TX_INTEN 0x404
+#define NETSEC_REG_NRM_TX_INTEN_SET 0x428
+#define NETSEC_REG_NRM_TX_INTEN_CLR 0x42c
+#define NRM_TX_ST_NTOWNR BIT(17)
+#define NRM_TX_ST_TR_ERR BIT(16)
+#define NRM_TX_ST_TXDONE BIT(15)
+#define NRM_TX_ST_TMREXP BIT(14)
+
+#define NETSEC_REG_NRM_RX_STATUS 0x440
+#define NETSEC_REG_NRM_RX_INTEN 0x444
+#define NETSEC_REG_NRM_RX_INTEN_SET 0x468
+#define NETSEC_REG_NRM_RX_INTEN_CLR 0x46c
+#define NRM_RX_ST_RC_ERR BIT(16)
+#define NRM_RX_ST_PKTCNT BIT(15)
+#define NRM_RX_ST_TMREXP BIT(14)
+
+#define NETSEC_REG_PKT_CMD_BUF 0xd0
+
+#define NETSEC_REG_CLK_EN 0x100
+
+#define NETSEC_REG_PKT_CTRL 0x140
+
+#define NETSEC_REG_DMA_TMR_CTRL 0x20c
+#define NETSEC_REG_F_TAIKI_MC_VER 0x22c
+#define NETSEC_REG_F_TAIKI_VER 0x230
+#define NETSEC_REG_DMA_HM_CTRL 0x214
+#define NETSEC_REG_DMA_MH_CTRL 0x220
+#define NETSEC_REG_ADDR_DIS_CORE 0x218
+#define NETSEC_REG_DMAC_HM_CMD_BUF 0x210
+#define NETSEC_REG_DMAC_MH_CMD_BUF 0x21c
+
+#define NETSEC_REG_NRM_TX_PKTCNT 0x410
+
+#define NETSEC_REG_NRM_TX_DONE_PKTCNT 0x414
+#define NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT 0x418
+
+#define NETSEC_REG_NRM_TX_TMR 0x41c
+
+#define NETSEC_REG_NRM_RX_PKTCNT 0x454
+#define NETSEC_REG_NRM_RX_RXINT_PKTCNT 0x458
+#define NETSEC_REG_NRM_TX_TXINT_TMR 0x420
+#define NETSEC_REG_NRM_RX_RXINT_TMR 0x460
+
+#define NETSEC_REG_NRM_RX_TMR 0x45c
+
+#define NETSEC_REG_NRM_TX_DESC_START_UP 0x434
+#define NETSEC_REG_NRM_TX_DESC_START_LW 0x408
+#define NETSEC_REG_NRM_RX_DESC_START_UP 0x474
+#define NETSEC_REG_NRM_RX_DESC_START_LW 0x448
+
+#define NETSEC_REG_NRM_TX_CONFIG 0x430
+#define NETSEC_REG_NRM_RX_CONFIG 0x470
+
+#define MAC_REG_STATUS 0x1024
+#define MAC_REG_DATA 0x11c0
+#define MAC_REG_CMD 0x11c4
+#define MAC_REG_FLOW_TH 0x11cc
+#define MAC_REG_INTF_SEL 0x11d4
+#define MAC_REG_DESC_INIT 0x11fc
+#define MAC_REG_DESC_SOFT_RST 0x1204
+#define NETSEC_REG_MODE_TRANS_COMP_STATUS 0x500
+
+#define GMAC_REG_MCR 0x0000
+#define GMAC_REG_MFFR 0x0004
+#define GMAC_REG_GAR 0x0010
+#define GMAC_REG_GDR 0x0014
+#define GMAC_REG_FCR 0x0018
+#define GMAC_REG_BMR 0x1000
+#define GMAC_REG_RDLAR 0x100c
+#define GMAC_REG_TDLAR 0x1010
+#define GMAC_REG_OMR 0x1018
+
+#define MHZ(n) ((n) * 1000 * 1000)
+
+#define NETSEC_TX_SHIFT_OWN_FIELD 31
+#define NETSEC_TX_SHIFT_LD_FIELD 30
+#define NETSEC_TX_SHIFT_DRID_FIELD 24
+#define NETSEC_TX_SHIFT_PT_FIELD 21
+#define NETSEC_TX_SHIFT_TDRID_FIELD 16
+#define NETSEC_TX_SHIFT_CC_FIELD 15
+#define NETSEC_TX_SHIFT_FS_FIELD 9
+#define NETSEC_TX_LAST 8
+#define NETSEC_TX_SHIFT_CO 7
+#define NETSEC_TX_SHIFT_SO 6
+#define NETSEC_TX_SHIFT_TRS_FIELD 4
+
+#define NETSEC_RX_PKT_OWN_FIELD 31
+#define NETSEC_RX_PKT_LD_FIELD 30
+#define NETSEC_RX_PKT_SDRID_FIELD 24
+#define NETSEC_RX_PKT_FR_FIELD 23
+#define NETSEC_RX_PKT_ER_FIELD 21
+#define NETSEC_RX_PKT_ERR_FIELD 16
+#define NETSEC_RX_PKT_TDRID_FIELD 12
+#define NETSEC_RX_PKT_FS_FIELD 9
+#define NETSEC_RX_PKT_LS_FIELD 8
+#define NETSEC_RX_PKT_CO_FIELD 6
+
+#define NETSEC_RX_PKT_ERR_MASK 3
+
+#define NETSEC_MAX_TX_PKT_LEN 1518
+#define NETSEC_MAX_TX_JUMBO_PKT_LEN 9018
+
+#define NETSEC_RING_GMAC 15
+#define NETSEC_RING_MAX 2
+
+#define NETSEC_TCP_SEG_LEN_MAX 1460
+#define NETSEC_TCP_JUMBO_SEG_LEN_MAX 8960
+
+#define NETSEC_RX_CKSUM_NOTAVAIL 0
+#define NETSEC_RX_CKSUM_OK 1
+#define NETSEC_RX_CKSUM_NG 2
+
+#define NETSEC_TOP_IRQ_REG_CODE_LOAD_END BIT(20)
+#define NETSEC_IRQ_TRANSITION_COMPLETE BIT(4)
+
+#define NETSEC_MODE_TRANS_COMP_IRQ_N2T BIT(20)
+#define NETSEC_MODE_TRANS_COMP_IRQ_T2N BIT(19)
+
+#define NETSEC_INT_PKTCNT_MAX 2047
+
+#define NETSEC_FLOW_START_TH_MAX 95
+#define NETSEC_FLOW_STOP_TH_MAX 95
+#define NETSEC_FLOW_PAUSE_TIME_MIN 5
+
+#define NETSEC_CLK_EN_REG_DOM_ALL 0x3f
+
+#define NETSEC_PKT_CTRL_REG_MODE_NRM BIT(28)
+#define NETSEC_PKT_CTRL_REG_EN_JUMBO BIT(27)
+#define NETSEC_PKT_CTRL_REG_LOG_CHKSUM_ER BIT(3)
+#define NETSEC_PKT_CTRL_REG_LOG_HD_INCOMPLETE BIT(2)
+#define NETSEC_PKT_CTRL_REG_LOG_HD_ER BIT(1)
+#define NETSEC_PKT_CTRL_REG_DRP_NO_MATCH BIT(0)
+
+#define NETSEC_CLK_EN_REG_DOM_G BIT(5)
+#define NETSEC_CLK_EN_REG_DOM_C BIT(1)
+#define NETSEC_CLK_EN_REG_DOM_D BIT(0)
+
+#define NETSEC_COM_INIT_REG_DB BIT(2)
+#define NETSEC_COM_INIT_REG_CLS BIT(1)
+#define NETSEC_COM_INIT_REG_ALL (NETSEC_COM_INIT_REG_CLS | \
+ NETSEC_COM_INIT_REG_DB)
+
+#define NETSEC_SOFT_RST_REG_RESET 0
+#define NETSEC_SOFT_RST_REG_RUN BIT(31)
+
+#define NETSEC_DMA_CTRL_REG_STOP 1
+#define MH_CTRL__MODE_TRANS BIT(20)
+
+#define NETSEC_GMAC_CMD_ST_READ 0
+#define NETSEC_GMAC_CMD_ST_WRITE BIT(28)
+#define NETSEC_GMAC_CMD_ST_BUSY BIT(31)
+
+#define NETSEC_GMAC_BMR_REG_COMMON 0x00412080
+#define NETSEC_GMAC_BMR_REG_RESET 0x00020181
+#define NETSEC_GMAC_BMR_REG_SWR 0x00000001
+
+#define NETSEC_GMAC_OMR_REG_ST BIT(13)
+#define NETSEC_GMAC_OMR_REG_SR BIT(1)
+
+#define NETSEC_GMAC_MCR_REG_IBN BIT(30)
+#define NETSEC_GMAC_MCR_REG_CST BIT(25)
+#define NETSEC_GMAC_MCR_REG_JE BIT(20)
+#define NETSEC_MCR_PS BIT(15)
+#define NETSEC_GMAC_MCR_REG_FES BIT(14)
+#define NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON 0x0000280c
+#define NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON 0x0001a00c
+
+#define NETSEC_FCR_RFE BIT(2)
+#define NETSEC_FCR_TFE BIT(1)
+
+#define NETSEC_GMAC_GAR_REG_GW BIT(1)
+#define NETSEC_GMAC_GAR_REG_GB BIT(0)
+
+#define NETSEC_GMAC_GAR_REG_SHIFT_PA 11
+#define NETSEC_GMAC_GAR_REG_SHIFT_GR 6
+#define GMAC_REG_SHIFT_CR_GAR 2
+
+#define NETSEC_GMAC_GAR_REG_CR_25_35_MHZ 2
+#define NETSEC_GMAC_GAR_REG_CR_35_60_MHZ 3
+#define NETSEC_GMAC_GAR_REG_CR_60_100_MHZ 0
+#define NETSEC_GMAC_GAR_REG_CR_100_150_MHZ 1
+#define NETSEC_GMAC_GAR_REG_CR_150_250_MHZ 4
+#define NETSEC_GMAC_GAR_REG_CR_250_300_MHZ 5
+
+#define NETSEC_GMAC_RDLAR_REG_COMMON 0x18000
+#define NETSEC_GMAC_TDLAR_REG_COMMON 0x1c000
+
+#define NETSEC_REG_NETSEC_VER_F_TAIKI 0x50000
+
+#define NETSEC_REG_DESC_RING_CONFIG_CFG_UP BIT(31)
+#define NETSEC_REG_DESC_RING_CONFIG_CH_RST BIT(30)
+#define NETSEC_REG_DESC_TMR_MODE 4
+#define NETSEC_REG_DESC_ENDIAN 0
+
+#define NETSEC_MAC_DESC_SOFT_RST_SOFT_RST 1
+#define NETSEC_MAC_DESC_INIT_REG_INIT 1
+
+#define NETSEC_EEPROM_MAC_ADDRESS 0x00
+#define NETSEC_EEPROM_HM_ME_ADDRESS_H 0x08
+#define NETSEC_EEPROM_HM_ME_ADDRESS_L 0x0C
+#define NETSEC_EEPROM_HM_ME_SIZE 0x10
+#define NETSEC_EEPROM_MH_ME_ADDRESS_H 0x14
+#define NETSEC_EEPROM_MH_ME_ADDRESS_L 0x18
+#define NETSEC_EEPROM_MH_ME_SIZE 0x1C
+#define NETSEC_EEPROM_PKT_ME_ADDRESS 0x20
+#define NETSEC_EEPROM_PKT_ME_SIZE 0x24
+
+#define DESC_NUM 128
+#define NAPI_BUDGET (DESC_NUM / 2)
+
+#define DESC_SZ sizeof(struct netsec_de)
+
+#define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000)
+
+enum ring_id {
+ NETSEC_RING_TX = 0,
+ NETSEC_RING_RX
+};
+
+struct netsec_desc {
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ void *addr;
+ u16 len;
+};
+
+struct netsec_desc_ring {
+ dma_addr_t desc_dma;
+ struct netsec_desc *desc;
+ void *vaddr;
+ u16 pkt_cnt;
+ u16 head, tail;
+};
+
+struct netsec_priv {
+ struct netsec_desc_ring desc_ring[NETSEC_RING_MAX];
+ struct ethtool_coalesce et_coalesce;
+ spinlock_t reglock; /* protect reg access */
+ struct napi_struct napi;
+ phy_interface_t phy_interface;
+ struct net_device *ndev;
+ struct device_node *phy_np;
+ struct phy_device *phydev;
+ struct mii_bus *mii_bus;
+ void __iomem *ioaddr;
+ void __iomem *eeprom_base;
+ struct device *dev;
+ struct clk *clk;
+ u32 msg_enable;
+ u32 freq;
+ bool rx_cksum_offload_flag;
+};
+
+struct netsec_de { /* Netsec Descriptor layout */
+ u32 attr;
+ u32 data_buf_addr_up;
+ u32 data_buf_addr_lw;
+ u32 buf_len_info;
+};
+
+struct netsec_tx_pkt_ctrl {
+ u16 tcp_seg_len;
+ bool tcp_seg_offload_flag;
+ bool cksum_offload_flag;
+};
+
+struct netsec_rx_pkt_info {
+ int rx_cksum_result;
+ int err_code;
+ bool err_flag;
+};
+
+static void netsec_write(struct netsec_priv *priv, u32 reg_addr, u32 val)
+{
+ writel(val, priv->ioaddr + reg_addr);
+}
+
+static u32 netsec_read(struct netsec_priv *priv, u32 reg_addr)
+{
+ return readl(priv->ioaddr + reg_addr);
+}
+
+/************* MDIO BUS OPS FOLLOW *************/
+
+#define TIMEOUT_SPINS_MAC 1000
+#define TIMEOUT_SECONDARY_MS_MAC 100
+
+static u32 netsec_clk_type(u32 freq)
+{
+ if (freq < MHZ(35))
+ return NETSEC_GMAC_GAR_REG_CR_25_35_MHZ;
+ if (freq < MHZ(60))
+ return NETSEC_GMAC_GAR_REG_CR_35_60_MHZ;
+ if (freq < MHZ(100))
+ return NETSEC_GMAC_GAR_REG_CR_60_100_MHZ;
+ if (freq < MHZ(150))
+ return NETSEC_GMAC_GAR_REG_CR_100_150_MHZ;
+ if (freq < MHZ(250))
+ return NETSEC_GMAC_GAR_REG_CR_150_250_MHZ;
+
+ return NETSEC_GMAC_GAR_REG_CR_250_300_MHZ;
+}
+
+static int netsec_wait_while_busy(struct netsec_priv *priv, u32 addr, u32 mask)
+{
+ u32 timeout = TIMEOUT_SPINS_MAC;
+
+ while (--timeout && netsec_read(priv, addr) & mask)
+ cpu_relax();
+ if (timeout)
+ return 0;
+
+ timeout = TIMEOUT_SECONDARY_MS_MAC;
+ while (--timeout && netsec_read(priv, addr) & mask)
+ usleep_range(1000, 2000);
+
+ if (timeout)
+ return 0;
+
+ netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
+
+ return -ETIMEDOUT;
+}
+
+static int netsec_mac_write(struct netsec_priv *priv, u32 addr, u32 value)
+{
+ netsec_write(priv, MAC_REG_DATA, value);
+ netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_WRITE);
+ return netsec_wait_while_busy(priv,
+ MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
+}
+
+static int netsec_mac_read(struct netsec_priv *priv, u32 addr, u32 *read)
+{
+ int ret;
+
+ netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_READ);
+ ret = netsec_wait_while_busy(priv,
+ MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
+ if (ret)
+ return ret;
+
+ *read = netsec_read(priv, MAC_REG_DATA);
+
+ return 0;
+}
+
+static int netsec_mac_wait_while_busy(struct netsec_priv *priv,
+ u32 addr, u32 mask)
+{
+ u32 timeout = TIMEOUT_SPINS_MAC;
+ int ret, data;
+
+ do {
+ ret = netsec_mac_read(priv, addr, &data);
+ if (ret)
+ break;
+ cpu_relax();
+ } while (--timeout && (data & mask));
+
+ if (timeout)
+ return 0;
+
+ timeout = TIMEOUT_SECONDARY_MS_MAC;
+ do {
+ usleep_range(1000, 2000);
+
+ ret = netsec_mac_read(priv, addr, &data);
+ if (ret)
+ break;
+ cpu_relax();
+ } while (--timeout && (data & mask));
+
+ if (timeout && !ret)
+ return 0;
+
+ netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
+
+ return -ETIMEDOUT;
+}
+
+static int netsec_mac_update_to_phy_state(struct netsec_priv *priv)
+{
+ struct phy_device *phydev = priv->ndev->phydev;
+ u32 value = 0;
+
+ value = phydev->duplex ? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON :
+ NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON;
+
+ if (phydev->speed != SPEED_1000)
+ value |= NETSEC_MCR_PS;
+
+ if (priv->phy_interface != PHY_INTERFACE_MODE_GMII &&
+ phydev->speed == SPEED_100)
+ value |= NETSEC_GMAC_MCR_REG_FES;
+
+ value |= NETSEC_GMAC_MCR_REG_CST | NETSEC_GMAC_MCR_REG_JE;
+
+ if (phy_interface_mode_is_rgmii(priv->phy_interface))
+ value |= NETSEC_GMAC_MCR_REG_IBN;
+
+ if (netsec_mac_write(priv, GMAC_REG_MCR, value))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int netsec_phy_write(struct mii_bus *bus,
+ int phy_addr, int reg, u16 val)
+{
+ struct netsec_priv *priv = bus->priv;
+
+ if (netsec_mac_write(priv, GMAC_REG_GDR, val))
+ return -ETIMEDOUT;
+ if (netsec_mac_write(priv, GMAC_REG_GAR,
+ phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
+ reg << NETSEC_GMAC_GAR_REG_SHIFT_GR |
+ NETSEC_GMAC_GAR_REG_GW | NETSEC_GMAC_GAR_REG_GB |
+ (netsec_clk_type(priv->freq) <<
+ GMAC_REG_SHIFT_CR_GAR)))
+ return -ETIMEDOUT;
+
+ return netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
+ NETSEC_GMAC_GAR_REG_GB);
+}
+
+static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr)
+{
+ struct netsec_priv *priv = bus->priv;
+ u32 data;
+ int ret;
+
+ if (netsec_mac_write(priv, GMAC_REG_GAR, NETSEC_GMAC_GAR_REG_GB |
+ phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
+ reg_addr << NETSEC_GMAC_GAR_REG_SHIFT_GR |
+ (netsec_clk_type(priv->freq) <<
+ GMAC_REG_SHIFT_CR_GAR)))
+ return -ETIMEDOUT;
+
+ ret = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
+ NETSEC_GMAC_GAR_REG_GB);
+ if (ret)
+ return ret;
+
+ ret = netsec_mac_read(priv, GMAC_REG_GDR, &data);
+ if (ret)
+ return ret;
+
+ return data;
+}
+
+/************* ETHTOOL_OPS FOLLOW *************/
+
+static void netsec_et_get_drvinfo(struct net_device *net_device,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, "netsec", sizeof(info->driver));
+ strlcpy(info->bus_info, dev_name(net_device->dev.parent),
+ sizeof(info->bus_info));
+}
+
+static int netsec_et_get_coalesce(struct net_device *net_device,
+ struct ethtool_coalesce *et_coalesce)
+{
+ struct netsec_priv *priv = netdev_priv(net_device);
+
+ *et_coalesce = priv->et_coalesce;
+
+ return 0;
+}
+
+static int netsec_et_set_coalesce(struct net_device *net_device,
+ struct ethtool_coalesce *et_coalesce)
+{
+ struct netsec_priv *priv = netdev_priv(net_device);
+
+ priv->et_coalesce = *et_coalesce;
+
+ if (priv->et_coalesce.tx_coalesce_usecs < 50)
+ priv->et_coalesce.tx_coalesce_usecs = 50;
+ if (priv->et_coalesce.tx_max_coalesced_frames < 1)
+ priv->et_coalesce.tx_max_coalesced_frames = 1;
+
+ netsec_write(priv, NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT,
+ priv->et_coalesce.tx_max_coalesced_frames);
+ netsec_write(priv, NETSEC_REG_NRM_TX_TXINT_TMR,
+ priv->et_coalesce.tx_coalesce_usecs);
+ netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TXDONE);
+ netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TMREXP);
+
+ if (priv->et_coalesce.rx_coalesce_usecs < 50)
+ priv->et_coalesce.rx_coalesce_usecs = 50;
+ if (priv->et_coalesce.rx_max_coalesced_frames < 1)
+ priv->et_coalesce.rx_max_coalesced_frames = 1;
+
+ netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_PKTCNT,
+ priv->et_coalesce.rx_max_coalesced_frames);
+ netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_TMR,
+ priv->et_coalesce.rx_coalesce_usecs);
+ netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_PKTCNT);
+ netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_TMREXP);
+
+ return 0;
+}
+
+static u32 netsec_et_get_msglevel(struct net_device *dev)
+{
+ struct netsec_priv *priv = netdev_priv(dev);
+
+ return priv->msg_enable;
+}
+
+static void netsec_et_set_msglevel(struct net_device *dev, u32 datum)
+{
+ struct netsec_priv *priv = netdev_priv(dev);
+
+ priv->msg_enable = datum;
+}
+
+static const struct ethtool_ops netsec_ethtool_ops = {
+ .get_drvinfo = netsec_et_get_drvinfo,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = netsec_et_get_coalesce,
+ .set_coalesce = netsec_et_set_coalesce,
+ .get_msglevel = netsec_et_get_msglevel,
+ .set_msglevel = netsec_et_set_msglevel,
+};
+
+/************* NETDEV_OPS FOLLOW *************/
+
+static struct sk_buff *netsec_alloc_skb(struct netsec_priv *priv,
+ struct netsec_desc *desc)
+{
+ struct sk_buff *skb;
+
+ if (device_get_dma_attr(priv->dev) == DEV_DMA_COHERENT) {
+ skb = netdev_alloc_skb_ip_align(priv->ndev, desc->len);
+ } else {
+ desc->len = L1_CACHE_ALIGN(desc->len);
+ skb = netdev_alloc_skb(priv->ndev, desc->len);
+ }
+ if (!skb)
+ return NULL;
+
+ desc->addr = skb->data;
+ desc->dma_addr = dma_map_single(priv->dev, desc->addr, desc->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->dev, desc->dma_addr)) {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+ return skb;
+}
+
+static void netsec_set_rx_de(struct netsec_priv *priv,
+ struct netsec_desc_ring *dring, u16 idx,
+ const struct netsec_desc *desc,
+ struct sk_buff *skb)
+{
+ struct netsec_de *de = dring->vaddr + DESC_SZ * idx;
+ u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) |
+ (1 << NETSEC_RX_PKT_FS_FIELD) |
+ (1 << NETSEC_RX_PKT_LS_FIELD);
+
+ if (idx == DESC_NUM - 1)
+ attr |= (1 << NETSEC_RX_PKT_LD_FIELD);
+
+ de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
+ de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
+ de->buf_len_info = desc->len;
+ de->attr = attr;
+ dma_wmb();
+
+ dring->desc[idx].dma_addr = desc->dma_addr;
+ dring->desc[idx].addr = desc->addr;
+ dring->desc[idx].len = desc->len;
+ dring->desc[idx].skb = skb;
+}
+
+static struct sk_buff *netsec_get_rx_de(struct netsec_priv *priv,
+ struct netsec_desc_ring *dring,
+ u16 idx,
+ struct netsec_rx_pkt_info *rxpi,
+ struct netsec_desc *desc, u16 *len)
+{
+ struct netsec_de de = {};
+
+ memcpy(&de, dring->vaddr + DESC_SZ * idx, DESC_SZ);
+
+ *len = de.buf_len_info >> 16;
+
+ rxpi->err_flag = (de.attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
+ rxpi->rx_cksum_result = (de.attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
+ rxpi->err_code = (de.attr >> NETSEC_RX_PKT_ERR_FIELD) &
+ NETSEC_RX_PKT_ERR_MASK;
+ *desc = dring->desc[idx];
+ return desc->skb;
+}
+
+static struct sk_buff *netsec_get_rx_pkt_data(struct netsec_priv *priv,
+ struct netsec_rx_pkt_info *rxpi,
+ struct netsec_desc *desc,
+ u16 *len)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+ struct sk_buff *tmp_skb, *skb = NULL;
+ struct netsec_desc td;
+ int tail;
+
+ *rxpi = (struct netsec_rx_pkt_info){};
+
+ td.len = priv->ndev->mtu + 22;
+
+ tmp_skb = netsec_alloc_skb(priv, &td);
+
+ dma_rmb();
+
+ tail = dring->tail;
+
+ if (!tmp_skb) {
+ netsec_set_rx_de(priv, dring, tail, &dring->desc[tail],
+ dring->desc[tail].skb);
+ } else {
+ skb = netsec_get_rx_de(priv, dring, tail, rxpi, desc, len);
+ netsec_set_rx_de(priv, dring, tail, &td, tmp_skb);
+ }
+
+ /* move tail ahead */
+ dring->tail = (dring->tail + 1) % DESC_NUM;
+
+ dring->pkt_cnt--;
+
+ return skb;
+}
+
+static int netsec_clean_tx_dring(struct netsec_priv *priv, int budget)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
+ unsigned int pkts, bytes;
+
+ dring->pkt_cnt += netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT);
+
+ if (dring->pkt_cnt < budget)
+ budget = dring->pkt_cnt;
+
+ pkts = 0;
+ bytes = 0;
+
+ while (pkts < budget) {
+ struct netsec_desc *desc;
+ struct netsec_de *entry;
+ int tail, eop;
+
+ tail = dring->tail;
+
+ /* move tail ahead */
+ dring->tail = (tail + 1) % DESC_NUM;
+
+ desc = &dring->desc[tail];
+ entry = dring->vaddr + DESC_SZ * tail;
+
+ eop = (entry->attr >> NETSEC_TX_LAST) & 1;
+
+ dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
+ DMA_TO_DEVICE);
+ if (eop) {
+ pkts++;
+ bytes += desc->skb->len;
+ dev_kfree_skb(desc->skb);
+ }
+ *desc = (struct netsec_desc){};
+ }
+ dring->pkt_cnt -= budget;
+
+ priv->ndev->stats.tx_packets += budget;
+ priv->ndev->stats.tx_bytes += bytes;
+
+ netdev_completed_queue(priv->ndev, budget, bytes);
+
+ return budget;
+}
+
+static int netsec_process_tx(struct netsec_priv *priv, int budget)
+{
+ struct net_device *ndev = priv->ndev;
+ int new, done = 0;
+
+ do {
+ new = netsec_clean_tx_dring(priv, budget);
+ done += new;
+ budget -= new;
+ } while (new);
+
+ if (done && netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+
+ return done;
+}
+
+static int netsec_process_rx(struct netsec_priv *priv, int budget)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+ struct net_device *ndev = priv->ndev;
+ struct netsec_rx_pkt_info rx_info;
+ int done = 0, rx_num = 0;
+ struct netsec_desc desc;
+ struct sk_buff *skb;
+ u16 len;
+
+ while (done < budget) {
+ if (!rx_num) {
+ rx_num = netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
+ dring->pkt_cnt += rx_num;
+
+ /* move head 'rx_num' */
+ dring->head = (dring->head + rx_num) % DESC_NUM;
+
+ rx_num = dring->pkt_cnt;
+ if (!rx_num)
+ break;
+ }
+ done++;
+ rx_num--;
+ skb = netsec_get_rx_pkt_data(priv, &rx_info, &desc, &len);
+ if (unlikely(!skb) || rx_info.err_flag) {
+ netif_err(priv, drv, priv->ndev,
+ "%s: rx fail err(%d)\n",
+ __func__, rx_info.err_code);
+ ndev->stats.rx_dropped++;
+ continue;
+ }
+
+ dma_unmap_single(priv->dev, desc.dma_addr, desc.len,
+ DMA_FROM_DEVICE);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, priv->ndev);
+
+ if (priv->rx_cksum_offload_flag &&
+ rx_info.rx_cksum_result == NETSEC_RX_CKSUM_OK)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (napi_gro_receive(&priv->napi, skb) != GRO_DROP) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
+ }
+ }
+
+ return done;
+}
+
+static int netsec_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct netsec_priv *priv;
+ struct net_device *ndev;
+ int tx, rx, done, todo;
+
+ priv = container_of(napi, struct netsec_priv, napi);
+ ndev = priv->ndev;
+
+ todo = budget;
+ do {
+ if (!todo)
+ break;
+
+ tx = netsec_process_tx(priv, todo);
+ todo -= tx;
+
+ if (!todo)
+ break;
+
+ rx = netsec_process_rx(priv, todo);
+ todo -= rx;
+ } while (rx || tx);
+
+ done = budget - todo;
+
+ if (done < budget && napi_complete_done(napi, done)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->reglock, flags);
+ netsec_write(priv, NETSEC_REG_INTEN_SET,
+ NETSEC_IRQ_RX | NETSEC_IRQ_TX);
+ spin_unlock_irqrestore(&priv->reglock, flags);
+ }
+
+ return done;
+}
+
+static void netsec_set_tx_de(struct netsec_priv *priv,
+ struct netsec_desc_ring *dring,
+ const struct netsec_tx_pkt_ctrl *tx_ctrl,
+ const struct netsec_desc *desc,
+ struct sk_buff *skb)
+{
+ int idx = dring->head;
+ struct netsec_de *de;
+ u32 attr;
+
+ de = dring->vaddr + (DESC_SZ * idx);
+
+ attr = (1 << NETSEC_TX_SHIFT_OWN_FIELD) |
+ (1 << NETSEC_TX_SHIFT_PT_FIELD) |
+ (NETSEC_RING_GMAC << NETSEC_TX_SHIFT_TDRID_FIELD) |
+ (1 << NETSEC_TX_SHIFT_FS_FIELD) |
+ (1 << NETSEC_TX_LAST) |
+ (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) |
+ (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) |
+ (1 << NETSEC_TX_SHIFT_TRS_FIELD);
+ if (idx == DESC_NUM - 1)
+ attr |= (1 << NETSEC_TX_SHIFT_LD_FIELD);
+
+ de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
+ de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
+ de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
+ de->attr = attr;
+ dma_wmb();
+
+ dring->desc[idx] = *desc;
+ dring->desc[idx].skb = skb;
+
+ /* move head ahead */
+ dring->head = (dring->head + 1) % DESC_NUM;
+}
+
+static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
+ struct netsec_tx_pkt_ctrl tx_ctrl = {};
+ struct netsec_desc tx_desc;
+ u16 tso_seg_len = 0;
+ int filled;
+
+ /* differentiate between full/emtpy ring */
+ if (dring->head >= dring->tail)
+ filled = dring->head - dring->tail;
+ else
+ filled = dring->head + DESC_NUM - dring->tail;
+
+ if (DESC_NUM - filled < 2) { /* if less than 2 available */
+ netif_err(priv, drv, priv->ndev, "%s: TxQFull!\n", __func__);
+ netif_stop_queue(priv->ndev);
+ dma_wmb();
+ return NETDEV_TX_BUSY;
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ tx_ctrl.cksum_offload_flag = true;
+
+ if (skb_is_gso(skb))
+ tso_seg_len = skb_shinfo(skb)->gso_size;
+
+ if (tso_seg_len > 0) {
+ if (skb->protocol == htons(ETH_P_IP)) {
+ ip_hdr(skb)->tot_len = 0;
+ tcp_hdr(skb)->check =
+ ~tcp_v4_check(0, ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, 0);
+ } else {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
+
+ tx_ctrl.tcp_seg_offload_flag = true;
+ tx_ctrl.tcp_seg_len = tso_seg_len;
+ }
+
+ tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) {
+ netif_err(priv, drv, priv->ndev,
+ "%s: DMA mapping failed\n", __func__);
+ ndev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ tx_desc.addr = skb->data;
+ tx_desc.len = skb_headlen(skb);
+
+ skb_tx_timestamp(skb);
+ netdev_sent_queue(priv->ndev, skb->len);
+
+ netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
+ netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */
+
+ return NETDEV_TX_OK;
+}
+
+static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[id];
+ struct netsec_desc *desc;
+ u16 idx;
+
+ if (!dring->vaddr || !dring->desc)
+ return;
+
+ for (idx = 0; idx < DESC_NUM; idx++) {
+ desc = &dring->desc[idx];
+ if (!desc->addr)
+ continue;
+
+ dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
+ id == NETSEC_RING_RX ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE);
+ dev_kfree_skb(desc->skb);
+ }
+
+ memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM);
+ memset(dring->vaddr, 0, DESC_SZ * DESC_NUM);
+
+ dring->head = 0;
+ dring->tail = 0;
+ dring->pkt_cnt = 0;
+}
+
+static void netsec_free_dring(struct netsec_priv *priv, int id)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[id];
+
+ if (dring->vaddr) {
+ dma_free_coherent(priv->dev, DESC_SZ * DESC_NUM,
+ dring->vaddr, dring->desc_dma);
+ dring->vaddr = NULL;
+ }
+
+ kfree(dring->desc);
+ dring->desc = NULL;
+}
+
+static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[id];
+ int ret = 0;
+
+ dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
+ &dring->desc_dma, GFP_KERNEL);
+ if (!dring->vaddr) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ dring->desc = kzalloc(DESC_NUM * sizeof(*dring->desc), GFP_KERNEL);
+ if (!dring->desc) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ return 0;
+err:
+ netsec_free_dring(priv, id);
+
+ return ret;
+}
+
+static int netsec_setup_rx_dring(struct netsec_priv *priv)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+ struct netsec_desc desc;
+ struct sk_buff *skb;
+ int n;
+
+ desc.len = priv->ndev->mtu + 22;
+
+ for (n = 0; n < DESC_NUM; n++) {
+ skb = netsec_alloc_skb(priv, &desc);
+ if (!skb) {
+ netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
+ return -ENOMEM;
+ }
+ netsec_set_rx_de(priv, dring, n, &desc, skb);
+ }
+
+ return 0;
+}
+
+static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg,
+ u32 addr_h, u32 addr_l, u32 size)
+{
+ u64 base = (u64)addr_h << 32 | addr_l;
+ void __iomem *ucode;
+ u32 i;
+
+ ucode = ioremap(base, size * sizeof(u32));
+ if (!ucode)
+ return -ENOMEM;
+
+ for (i = 0; i < size; i++)
+ netsec_write(priv, reg, readl(ucode + i * 4));
+
+ iounmap(ucode);
+ return 0;
+}
+
+static int netsec_netdev_load_microcode(struct netsec_priv *priv)
+{
+ u32 addr_h, addr_l, size;
+ int err;
+
+ addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_H);
+ addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_L);
+ size = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_SIZE);
+ err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_HM_CMD_BUF,
+ addr_h, addr_l, size);
+ if (err)
+ return err;
+
+ addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_H);
+ addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_L);
+ size = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_SIZE);
+ err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_MH_CMD_BUF,
+ addr_h, addr_l, size);
+ if (err)
+ return err;
+
+ addr_h = 0;
+ addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_ADDRESS);
+ size = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_SIZE);
+ err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_PKT_CMD_BUF,
+ addr_h, addr_l, size);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int netsec_reset_hardware(struct netsec_priv *priv)
+{
+ u32 value;
+ int err;
+
+ /* stop DMA engines */
+ if (!netsec_read(priv, NETSEC_REG_ADDR_DIS_CORE)) {
+ netsec_write(priv, NETSEC_REG_DMA_HM_CTRL,
+ NETSEC_DMA_CTRL_REG_STOP);
+ netsec_write(priv, NETSEC_REG_DMA_MH_CTRL,
+ NETSEC_DMA_CTRL_REG_STOP);
+
+ while (netsec_read(priv, NETSEC_REG_DMA_HM_CTRL) &
+ NETSEC_DMA_CTRL_REG_STOP)
+ cpu_relax();
+
+ while (netsec_read(priv, NETSEC_REG_DMA_MH_CTRL) &
+ NETSEC_DMA_CTRL_REG_STOP)
+ cpu_relax();
+ }
+
+ netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RESET);
+ netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RUN);
+ netsec_write(priv, NETSEC_REG_COM_INIT, NETSEC_COM_INIT_REG_ALL);
+
+ while (netsec_read(priv, NETSEC_REG_COM_INIT) != 0)
+ cpu_relax();
+
+ /* set desc_start addr */
+ netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_UP,
+ upper_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
+ netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_LW,
+ lower_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
+
+ netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_UP,
+ upper_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
+ netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_LW,
+ lower_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
+
+ /* set normal tx dring ring config */
+ netsec_write(priv, NETSEC_REG_NRM_TX_CONFIG,
+ 1 << NETSEC_REG_DESC_ENDIAN);
+ netsec_write(priv, NETSEC_REG_NRM_RX_CONFIG,
+ 1 << NETSEC_REG_DESC_ENDIAN);
+
+ err = netsec_netdev_load_microcode(priv);
+ if (err) {
+ netif_err(priv, probe, priv->ndev,
+ "%s: failed to load microcode (%d)\n", __func__, err);
+ return err;
+ }
+
+ /* start DMA engines */
+ netsec_write(priv, NETSEC_REG_DMA_TMR_CTRL, priv->freq / 1000000 - 1);
+ netsec_write(priv, NETSEC_REG_ADDR_DIS_CORE, 0);
+
+ usleep_range(1000, 2000);
+
+ if (!(netsec_read(priv, NETSEC_REG_TOP_STATUS) &
+ NETSEC_TOP_IRQ_REG_CODE_LOAD_END)) {
+ netif_err(priv, probe, priv->ndev,
+ "microengine start failed\n");
+ return -ENXIO;
+ }
+ netsec_write(priv, NETSEC_REG_TOP_STATUS,
+ NETSEC_TOP_IRQ_REG_CODE_LOAD_END);
+
+ value = NETSEC_PKT_CTRL_REG_MODE_NRM;
+ if (priv->ndev->mtu > ETH_DATA_LEN)
+ value |= NETSEC_PKT_CTRL_REG_EN_JUMBO;
+
+ /* change to normal mode */
+ netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, MH_CTRL__MODE_TRANS);
+ netsec_write(priv, NETSEC_REG_PKT_CTRL, value);
+
+ while ((netsec_read(priv, NETSEC_REG_MODE_TRANS_COMP_STATUS) &
+ NETSEC_MODE_TRANS_COMP_IRQ_T2N) == 0)
+ cpu_relax();
+
+ /* clear any pending EMPTY/ERR irq status */
+ netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, ~0);
+
+ /* Disable TX & RX intr */
+ netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
+
+ return 0;
+}
+
+static int netsec_start_gmac(struct netsec_priv *priv)
+{
+ struct phy_device *phydev = priv->ndev->phydev;
+ u32 value = 0;
+ int ret;
+
+ if (phydev->speed != SPEED_1000)
+ value = (NETSEC_GMAC_MCR_REG_CST |
+ NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON);
+
+ if (netsec_mac_write(priv, GMAC_REG_MCR, value))
+ return -ETIMEDOUT;
+ if (netsec_mac_write(priv, GMAC_REG_BMR,
+ NETSEC_GMAC_BMR_REG_RESET))
+ return -ETIMEDOUT;
+
+ /* Wait soft reset */
+ usleep_range(1000, 5000);
+
+ ret = netsec_mac_read(priv, GMAC_REG_BMR, &value);
+ if (ret)
+ return ret;
+ if (value & NETSEC_GMAC_BMR_REG_SWR)
+ return -EAGAIN;
+
+ netsec_write(priv, MAC_REG_DESC_SOFT_RST, 1);
+ if (netsec_wait_while_busy(priv, MAC_REG_DESC_SOFT_RST, 1))
+ return -ETIMEDOUT;
+
+ netsec_write(priv, MAC_REG_DESC_INIT, 1);
+ if (netsec_wait_while_busy(priv, MAC_REG_DESC_INIT, 1))
+ return -ETIMEDOUT;
+
+ if (netsec_mac_write(priv, GMAC_REG_BMR,
+ NETSEC_GMAC_BMR_REG_COMMON))
+ return -ETIMEDOUT;
+ if (netsec_mac_write(priv, GMAC_REG_RDLAR,
+ NETSEC_GMAC_RDLAR_REG_COMMON))
+ return -ETIMEDOUT;
+ if (netsec_mac_write(priv, GMAC_REG_TDLAR,
+ NETSEC_GMAC_TDLAR_REG_COMMON))
+ return -ETIMEDOUT;
+ if (netsec_mac_write(priv, GMAC_REG_MFFR, 0x80000001))
+ return -ETIMEDOUT;
+
+ ret = netsec_mac_update_to_phy_state(priv);
+ if (ret)
+ return ret;
+
+ ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
+ if (ret)
+ return ret;
+
+ value |= NETSEC_GMAC_OMR_REG_SR;
+ value |= NETSEC_GMAC_OMR_REG_ST;
+
+ netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
+ netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
+
+ netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce);
+
+ if (netsec_mac_write(priv, GMAC_REG_OMR, value))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int netsec_stop_gmac(struct netsec_priv *priv)
+{
+ u32 value;
+ int ret;
+
+ ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
+ if (ret)
+ return ret;
+ value &= ~NETSEC_GMAC_OMR_REG_SR;
+ value &= ~NETSEC_GMAC_OMR_REG_ST;
+
+ /* disable all interrupts */
+ netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
+ netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
+
+ return netsec_mac_write(priv, GMAC_REG_OMR, value);
+}
+
+static void netsec_phy_adjust_link(struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+
+ if (ndev->phydev->link)
+ netsec_start_gmac(priv);
+ else
+ netsec_stop_gmac(priv);
+
+ phy_print_status(ndev->phydev);
+}
+
+static irqreturn_t netsec_irq_handler(int irq, void *dev_id)
+{
+ struct netsec_priv *priv = dev_id;
+ u32 val, status = netsec_read(priv, NETSEC_REG_TOP_STATUS);
+ unsigned long flags;
+
+ /* Disable interrupts */
+ if (status & NETSEC_IRQ_TX) {
+ val = netsec_read(priv, NETSEC_REG_NRM_TX_STATUS);
+ netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, val);
+ }
+ if (status & NETSEC_IRQ_RX) {
+ val = netsec_read(priv, NETSEC_REG_NRM_RX_STATUS);
+ netsec_write(priv, NETSEC_REG_NRM_RX_STATUS, val);
+ }
+
+ spin_lock_irqsave(&priv->reglock, flags);
+ netsec_write(priv, NETSEC_REG_INTEN_CLR, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
+ spin_unlock_irqrestore(&priv->reglock, flags);
+
+ napi_schedule(&priv->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int netsec_netdev_open(struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ pm_runtime_get_sync(priv->dev);
+
+ ret = netsec_setup_rx_dring(priv);
+ if (ret) {
+ netif_err(priv, probe, priv->ndev,
+ "%s: fail setup ring\n", __func__);
+ goto err1;
+ }
+
+ ret = request_irq(priv->ndev->irq, netsec_irq_handler,
+ IRQF_SHARED, "netsec", priv);
+ if (ret) {
+ netif_err(priv, drv, priv->ndev, "request_irq failed\n");
+ goto err2;
+ }
+
+ if (dev_of_node(priv->dev)) {
+ if (!of_phy_connect(priv->ndev, priv->phy_np,
+ netsec_phy_adjust_link, 0,
+ priv->phy_interface)) {
+ netif_err(priv, link, priv->ndev, "missing PHY\n");
+ ret = -ENODEV;
+ goto err3;
+ }
+ } else {
+ ret = phy_connect_direct(priv->ndev, priv->phydev,
+ netsec_phy_adjust_link,
+ priv->phy_interface);
+ if (ret) {
+ netif_err(priv, link, priv->ndev,
+ "phy_connect_direct() failed (%d)\n", ret);
+ goto err3;
+ }
+ }
+
+ phy_start(ndev->phydev);
+
+ netsec_start_gmac(priv);
+ napi_enable(&priv->napi);
+ netif_start_queue(ndev);
+
+ /* Enable RX intr. */
+ netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX);
+
+ return 0;
+err3:
+ free_irq(priv->ndev->irq, priv);
+err2:
+ netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
+err1:
+ pm_runtime_put_sync(priv->dev);
+ return ret;
+}
+
+static int netsec_netdev_stop(struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(priv->ndev);
+ dma_wmb();
+
+ napi_disable(&priv->napi);
+
+ netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
+ netsec_stop_gmac(priv);
+
+ free_irq(priv->ndev->irq, priv);
+
+ netsec_uninit_pkt_dring(priv, NETSEC_RING_TX);
+ netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
+
+ phy_stop(ndev->phydev);
+ phy_disconnect(ndev->phydev);
+
+ pm_runtime_put_sync(priv->dev);
+
+ return 0;
+}
+
+static int netsec_netdev_init(struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = netsec_alloc_dring(priv, NETSEC_RING_TX);
+ if (ret)
+ return ret;
+
+ ret = netsec_alloc_dring(priv, NETSEC_RING_RX);
+ if (ret)
+ goto err1;
+
+ ret = netsec_reset_hardware(priv);
+ if (ret)
+ goto err2;
+
+ return 0;
+err2:
+ netsec_free_dring(priv, NETSEC_RING_RX);
+err1:
+ netsec_free_dring(priv, NETSEC_RING_TX);
+ return ret;
+}
+
+static void netsec_netdev_uninit(struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+
+ netsec_free_dring(priv, NETSEC_RING_RX);
+ netsec_free_dring(priv, NETSEC_RING_TX);
+}
+
+static int netsec_netdev_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+
+ priv->rx_cksum_offload_flag = !!(features & NETIF_F_RXCSUM);
+
+ return 0;
+}
+
+static int netsec_netdev_ioctl(struct net_device *ndev, struct ifreq *ifr,
+ int cmd)
+{
+ return phy_mii_ioctl(ndev->phydev, ifr, cmd);
+}
+
+static const struct net_device_ops netsec_netdev_ops = {
+ .ndo_init = netsec_netdev_init,
+ .ndo_uninit = netsec_netdev_uninit,
+ .ndo_open = netsec_netdev_open,
+ .ndo_stop = netsec_netdev_stop,
+ .ndo_start_xmit = netsec_netdev_start_xmit,
+ .ndo_set_features = netsec_netdev_set_features,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = netsec_netdev_ioctl,
+};
+
+static int netsec_of_probe(struct platform_device *pdev,
+ struct netsec_priv *priv)
+{
+ priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ if (!priv->phy_np) {
+ dev_err(&pdev->dev, "missing required property 'phy-handle'\n");
+ return -EINVAL;
+ }
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "phy_ref_clk not found\n");
+ return PTR_ERR(priv->clk);
+ }
+ priv->freq = clk_get_rate(priv->clk);
+
+ return 0;
+}
+
+static int netsec_acpi_probe(struct platform_device *pdev,
+ struct netsec_priv *priv, u32 *phy_addr)
+{
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_ACPI))
+ return -ENODEV;
+
+ ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "missing required property 'phy-channel'\n");
+ return ret;
+ }
+
+ ret = device_property_read_u32(&pdev->dev,
+ "socionext,phy-clock-frequency",
+ &priv->freq);
+ if (ret)
+ dev_err(&pdev->dev,
+ "missing required property 'socionext,phy-clock-frequency'\n");
+ return ret;
+}
+
+static void netsec_unregister_mdio(struct netsec_priv *priv)
+{
+ struct phy_device *phydev = priv->phydev;
+
+ if (!dev_of_node(priv->dev) && phydev) {
+ phy_device_remove(phydev);
+ phy_device_free(phydev);
+ }
+
+ mdiobus_unregister(priv->mii_bus);
+}
+
+static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
+{
+ struct mii_bus *bus;
+ int ret;
+
+ bus = devm_mdiobus_alloc(priv->dev);
+ if (!bus)
+ return -ENOMEM;
+
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(priv->dev));
+ bus->priv = priv;
+ bus->name = "SNI NETSEC MDIO";
+ bus->read = netsec_phy_read;
+ bus->write = netsec_phy_write;
+ bus->parent = priv->dev;
+ priv->mii_bus = bus;
+
+ if (dev_of_node(priv->dev)) {
+ struct device_node *mdio_node, *parent = dev_of_node(priv->dev);
+
+ mdio_node = of_get_child_by_name(parent, "mdio");
+ if (mdio_node) {
+ parent = mdio_node;
+ } else {
+ /* older f/w doesn't populate the mdio subnode,
+ * allow relaxed upgrade of f/w in due time.
+ */
+ dev_info(priv->dev, "Upgrade f/w for mdio subnode!\n");
+ }
+
+ ret = of_mdiobus_register(bus, parent);
+ of_node_put(mdio_node);
+
+ if (ret) {
+ dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
+ return ret;
+ }
+ } else {
+ /* Mask out all PHYs from auto probing. */
+ bus->phy_mask = ~0;
+ ret = mdiobus_register(bus);
+ if (ret) {
+ dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
+ return ret;
+ }
+
+ priv->phydev = get_phy_device(bus, phy_addr, false);
+ if (IS_ERR(priv->phydev)) {
+ ret = PTR_ERR(priv->phydev);
+ dev_err(priv->dev, "get_phy_device err(%d)\n", ret);
+ priv->phydev = NULL;
+ return -ENODEV;
+ }
+
+ ret = phy_device_register(priv->phydev);
+ if (ret) {
+ mdiobus_unregister(bus);
+ dev_err(priv->dev,
+ "phy_device_register err(%d)\n", ret);
+ }
+ }
+
+ return ret;
+}
+
+static int netsec_probe(struct platform_device *pdev)
+{
+ struct resource *mmio_res, *eeprom_res, *irq_res;
+ u8 *mac, macbuf[ETH_ALEN];
+ struct netsec_priv *priv;
+ u32 hw_ver, phy_addr = 0;
+ struct net_device *ndev;
+ int ret;
+
+ mmio_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mmio_res) {
+ dev_err(&pdev->dev, "No MMIO resource found.\n");
+ return -ENODEV;
+ }
+
+ eeprom_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!eeprom_res) {
+ dev_info(&pdev->dev, "No EEPROM resource found.\n");
+ return -ENODEV;
+ }
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq_res) {
+ dev_err(&pdev->dev, "No IRQ resource found.\n");
+ return -ENODEV;
+ }
+
+ ndev = alloc_etherdev(sizeof(*priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+
+ spin_lock_init(&priv->reglock);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ platform_set_drvdata(pdev, priv);
+ ndev->irq = irq_res->start;
+ priv->dev = &pdev->dev;
+ priv->ndev = ndev;
+
+ priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV |
+ NETIF_MSG_LINK | NETIF_MSG_PROBE;
+
+ priv->phy_interface = device_get_phy_mode(&pdev->dev);
+ if (priv->phy_interface < 0) {
+ dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
+ ret = -ENODEV;
+ goto free_ndev;
+ }
+
+ priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start,
+ resource_size(mmio_res));
+ if (!priv->ioaddr) {
+ dev_err(&pdev->dev, "devm_ioremap() failed\n");
+ ret = -ENXIO;
+ goto free_ndev;
+ }
+
+ priv->eeprom_base = devm_ioremap(&pdev->dev, eeprom_res->start,
+ resource_size(eeprom_res));
+ if (!priv->eeprom_base) {
+ dev_err(&pdev->dev, "devm_ioremap() failed for EEPROM\n");
+ ret = -ENXIO;
+ goto free_ndev;
+ }
+
+ mac = device_get_mac_address(&pdev->dev, macbuf, sizeof(macbuf));
+ if (mac)
+ ether_addr_copy(ndev->dev_addr, mac);
+
+ if (priv->eeprom_base &&
+ (!mac || !is_valid_ether_addr(ndev->dev_addr))) {
+ void __iomem *macp = priv->eeprom_base +
+ NETSEC_EEPROM_MAC_ADDRESS;
+
+ ndev->dev_addr[0] = readb(macp + 3);
+ ndev->dev_addr[1] = readb(macp + 2);
+ ndev->dev_addr[2] = readb(macp + 1);
+ ndev->dev_addr[3] = readb(macp + 0);
+ ndev->dev_addr[4] = readb(macp + 7);
+ ndev->dev_addr[5] = readb(macp + 6);
+ }
+
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ dev_warn(&pdev->dev, "No MAC address found, using random\n");
+ eth_hw_addr_random(ndev);
+ }
+
+ if (dev_of_node(&pdev->dev))
+ ret = netsec_of_probe(pdev, priv);
+ else
+ ret = netsec_acpi_probe(pdev, priv, &phy_addr);
+ if (ret)
+ goto free_ndev;
+
+ if (!priv->freq) {
+ dev_err(&pdev->dev, "missing PHY reference clock frequency\n");
+ ret = -ENODEV;
+ goto free_ndev;
+ }
+
+ /* default for throughput */
+ priv->et_coalesce.rx_coalesce_usecs = 500;
+ priv->et_coalesce.rx_max_coalesced_frames = 8;
+ priv->et_coalesce.tx_coalesce_usecs = 500;
+ priv->et_coalesce.tx_max_coalesced_frames = 8;
+
+ ret = device_property_read_u32(&pdev->dev, "max-frame-size",
+ &ndev->max_mtu);
+ if (ret < 0)
+ ndev->max_mtu = ETH_DATA_LEN;
+
+ /* runtime_pm coverage just for probe, open/close also cover it */
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ hw_ver = netsec_read(priv, NETSEC_REG_F_TAIKI_VER);
+ /* this driver only supports F_TAIKI style NETSEC */
+ if (NETSEC_F_NETSEC_VER_MAJOR_NUM(hw_ver) !=
+ NETSEC_F_NETSEC_VER_MAJOR_NUM(NETSEC_REG_NETSEC_VER_F_TAIKI)) {
+ ret = -ENODEV;
+ goto pm_disable;
+ }
+
+ dev_info(&pdev->dev, "hardware revision %d.%d\n",
+ hw_ver >> 16, hw_ver & 0xffff);
+
+ netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_BUDGET);
+
+ ndev->netdev_ops = &netsec_netdev_ops;
+ ndev->ethtool_ops = &netsec_ethtool_ops;
+
+ ndev->features |= NETIF_F_HIGHDMA | NETIF_F_RXCSUM | NETIF_F_GSO |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ ndev->hw_features = ndev->features;
+
+ priv->rx_cksum_offload_flag = true;
+
+ ret = netsec_register_mdio(priv, phy_addr);
+ if (ret)
+ goto unreg_napi;
+
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
+ dev_warn(&pdev->dev, "Failed to enable 64-bit DMA\n");
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ netif_err(priv, probe, ndev, "register_netdev() failed\n");
+ goto unreg_mii;
+ }
+
+ pm_runtime_put_sync(&pdev->dev);
+ return 0;
+
+unreg_mii:
+ netsec_unregister_mdio(priv);
+unreg_napi:
+ netif_napi_del(&priv->napi);
+pm_disable:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+free_ndev:
+ free_netdev(ndev);
+ dev_err(&pdev->dev, "init failed\n");
+
+ return ret;
+}
+
+static int netsec_remove(struct platform_device *pdev)
+{
+ struct netsec_priv *priv = platform_get_drvdata(pdev);
+
+ unregister_netdev(priv->ndev);
+
+ netsec_unregister_mdio(priv);
+
+ netif_napi_del(&priv->napi);
+
+ pm_runtime_disable(&pdev->dev);
+ free_netdev(priv->ndev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int netsec_runtime_suspend(struct device *dev)
+{
+ struct netsec_priv *priv = dev_get_drvdata(dev);
+
+ netsec_write(priv, NETSEC_REG_CLK_EN, 0);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static int netsec_runtime_resume(struct device *dev)
+{
+ struct netsec_priv *priv = dev_get_drvdata(dev);
+
+ clk_prepare_enable(priv->clk);
+
+ netsec_write(priv, NETSEC_REG_CLK_EN, NETSEC_CLK_EN_REG_DOM_D |
+ NETSEC_CLK_EN_REG_DOM_C |
+ NETSEC_CLK_EN_REG_DOM_G);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops netsec_pm_ops = {
+ SET_RUNTIME_PM_OPS(netsec_runtime_suspend, netsec_runtime_resume, NULL)
+};
+
+static const struct of_device_id netsec_dt_ids[] = {
+ { .compatible = "socionext,synquacer-netsec" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, netsec_dt_ids);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id netsec_acpi_ids[] = {
+ { "SCX0001" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids);
+#endif
+
+static struct platform_driver netsec_driver = {
+ .probe = netsec_probe,
+ .remove = netsec_remove,
+ .driver = {
+ .name = "netsec",
+ .pm = &netsec_pm_ops,
+ .of_match_table = netsec_dt_ids,
+ .acpi_match_table = ACPI_PTR(netsec_acpi_ids),
+ },
+};
+module_platform_driver(netsec_driver);
+
+MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_DESCRIPTION("NETSEC Ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
new file mode 100644
index 000000000000..111e7ca9df56
--- /dev/null
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -0,0 +1,1736 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * sni_ave.c - Socionext UniPhier AVE ethernet driver
+ * Copyright 2014 Panasonic Corporation
+ * Copyright 2015-2017 Socionext Inc.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/reset.h>
+#include <linux/types.h>
+#include <linux/u64_stats_sync.h>
+
+/* General Register Group */
+#define AVE_IDR 0x000 /* ID */
+#define AVE_VR 0x004 /* Version */
+#define AVE_GRR 0x008 /* Global Reset */
+#define AVE_CFGR 0x00c /* Configuration */
+
+/* Interrupt Register Group */
+#define AVE_GIMR 0x100 /* Global Interrupt Mask */
+#define AVE_GISR 0x104 /* Global Interrupt Status */
+
+/* MAC Register Group */
+#define AVE_TXCR 0x200 /* TX Setup */
+#define AVE_RXCR 0x204 /* RX Setup */
+#define AVE_RXMAC1R 0x208 /* MAC address (lower) */
+#define AVE_RXMAC2R 0x20c /* MAC address (upper) */
+#define AVE_MDIOCTR 0x214 /* MDIO Control */
+#define AVE_MDIOAR 0x218 /* MDIO Address */
+#define AVE_MDIOWDR 0x21c /* MDIO Data */
+#define AVE_MDIOSR 0x220 /* MDIO Status */
+#define AVE_MDIORDR 0x224 /* MDIO Rd Data */
+
+/* Descriptor Control Register Group */
+#define AVE_DESCC 0x300 /* Descriptor Control */
+#define AVE_TXDC 0x304 /* TX Descriptor Configuration */
+#define AVE_RXDC0 0x308 /* RX Descriptor Ring0 Configuration */
+#define AVE_IIRQC 0x34c /* Interval IRQ Control */
+
+/* Packet Filter Register Group */
+#define AVE_PKTF_BASE 0x800 /* PF Base Address */
+#define AVE_PFMBYTE_BASE 0xd00 /* PF Mask Byte Base Address */
+#define AVE_PFMBIT_BASE 0xe00 /* PF Mask Bit Base Address */
+#define AVE_PFSEL_BASE 0xf00 /* PF Selector Base Address */
+#define AVE_PFEN 0xffc /* Packet Filter Enable */
+#define AVE_PKTF(ent) (AVE_PKTF_BASE + (ent) * 0x40)
+#define AVE_PFMBYTE(ent) (AVE_PFMBYTE_BASE + (ent) * 8)
+#define AVE_PFMBIT(ent) (AVE_PFMBIT_BASE + (ent) * 4)
+#define AVE_PFSEL(ent) (AVE_PFSEL_BASE + (ent) * 4)
+
+/* 64bit descriptor memory */
+#define AVE_DESC_SIZE_64 12 /* Descriptor Size */
+
+#define AVE_TXDM_64 0x1000 /* Tx Descriptor Memory */
+#define AVE_RXDM_64 0x1c00 /* Rx Descriptor Memory */
+
+#define AVE_TXDM_SIZE_64 0x0ba0 /* Tx Descriptor Memory Size 3KB */
+#define AVE_RXDM_SIZE_64 0x6000 /* Rx Descriptor Memory Size 24KB */
+
+/* 32bit descriptor memory */
+#define AVE_DESC_SIZE_32 8 /* Descriptor Size */
+
+#define AVE_TXDM_32 0x1000 /* Tx Descriptor Memory */
+#define AVE_RXDM_32 0x1800 /* Rx Descriptor Memory */
+
+#define AVE_TXDM_SIZE_32 0x07c0 /* Tx Descriptor Memory Size 2KB */
+#define AVE_RXDM_SIZE_32 0x4000 /* Rx Descriptor Memory Size 16KB */
+
+/* RMII Bridge Register Group */
+#define AVE_RSTCTRL 0x8028 /* Reset control */
+#define AVE_RSTCTRL_RMIIRST BIT(16)
+#define AVE_LINKSEL 0x8034 /* Link speed setting */
+#define AVE_LINKSEL_100M BIT(0)
+
+/* AVE_GRR */
+#define AVE_GRR_RXFFR BIT(5) /* Reset RxFIFO */
+#define AVE_GRR_PHYRST BIT(4) /* Reset external PHY */
+#define AVE_GRR_GRST BIT(0) /* Reset all MAC */
+
+/* AVE_CFGR */
+#define AVE_CFGR_FLE BIT(31) /* Filter Function */
+#define AVE_CFGR_CHE BIT(30) /* Checksum Function */
+#define AVE_CFGR_MII BIT(27) /* Func mode (1:MII/RMII, 0:RGMII) */
+#define AVE_CFGR_IPFCEN BIT(24) /* IP fragment sum Enable */
+
+/* AVE_GISR (common with GIMR) */
+#define AVE_GI_PHY BIT(24) /* PHY interrupt */
+#define AVE_GI_TX BIT(16) /* Tx complete */
+#define AVE_GI_RXERR BIT(8) /* Receive frame more than max size */
+#define AVE_GI_RXOVF BIT(7) /* Overflow at the RxFIFO */
+#define AVE_GI_RXDROP BIT(6) /* Drop packet */
+#define AVE_GI_RXIINT BIT(5) /* Interval interrupt */
+
+/* AVE_TXCR */
+#define AVE_TXCR_FLOCTR BIT(18) /* Flow control */
+#define AVE_TXCR_TXSPD_1G BIT(17)
+#define AVE_TXCR_TXSPD_100 BIT(16)
+
+/* AVE_RXCR */
+#define AVE_RXCR_RXEN BIT(30) /* Rx enable */
+#define AVE_RXCR_FDUPEN BIT(22) /* Interface mode */
+#define AVE_RXCR_FLOCTR BIT(21) /* Flow control */
+#define AVE_RXCR_AFEN BIT(19) /* MAC address filter */
+#define AVE_RXCR_DRPEN BIT(18) /* Drop pause frame */
+#define AVE_RXCR_MPSIZ_MASK GENMASK(10, 0)
+
+/* AVE_MDIOCTR */
+#define AVE_MDIOCTR_RREQ BIT(3) /* Read request */
+#define AVE_MDIOCTR_WREQ BIT(2) /* Write request */
+
+/* AVE_MDIOSR */
+#define AVE_MDIOSR_STS BIT(0) /* access status */
+
+/* AVE_DESCC */
+#define AVE_DESCC_STATUS_MASK GENMASK(31, 16)
+#define AVE_DESCC_RD0 BIT(8) /* Enable Rx descriptor Ring0 */
+#define AVE_DESCC_RDSTP BIT(4) /* Pause Rx descriptor */
+#define AVE_DESCC_TD BIT(0) /* Enable Tx descriptor */
+
+/* AVE_TXDC */
+#define AVE_TXDC_SIZE GENMASK(27, 16) /* Size of Tx descriptor */
+#define AVE_TXDC_ADDR GENMASK(11, 0) /* Start address */
+#define AVE_TXDC_ADDR_START 0
+
+/* AVE_RXDC0 */
+#define AVE_RXDC0_SIZE GENMASK(30, 16) /* Size of Rx descriptor */
+#define AVE_RXDC0_ADDR GENMASK(14, 0) /* Start address */
+#define AVE_RXDC0_ADDR_START 0
+
+/* AVE_IIRQC */
+#define AVE_IIRQC_EN0 BIT(27) /* Enable interval interrupt Ring0 */
+#define AVE_IIRQC_BSCK GENMASK(15, 0) /* Interval count unit */
+
+/* Command status for descriptor */
+#define AVE_STS_OWN BIT(31) /* Descriptor ownership */
+#define AVE_STS_INTR BIT(29) /* Request for interrupt */
+#define AVE_STS_OK BIT(27) /* Normal transmit */
+/* TX */
+#define AVE_STS_NOCSUM BIT(28) /* No use HW checksum */
+#define AVE_STS_1ST BIT(26) /* Head of buffer chain */
+#define AVE_STS_LAST BIT(25) /* Tail of buffer chain */
+#define AVE_STS_OWC BIT(21) /* Out of window,Late Collision */
+#define AVE_STS_EC BIT(20) /* Excess collision occurred */
+#define AVE_STS_PKTLEN_TX_MASK GENMASK(15, 0)
+/* RX */
+#define AVE_STS_CSSV BIT(21) /* Checksum check performed */
+#define AVE_STS_CSER BIT(20) /* Checksum error detected */
+#define AVE_STS_PKTLEN_RX_MASK GENMASK(10, 0)
+
+/* Packet filter */
+#define AVE_PFMBYTE_MASK0 (GENMASK(31, 8) | GENMASK(5, 0))
+#define AVE_PFMBYTE_MASK1 GENMASK(25, 0)
+#define AVE_PFMBIT_MASK GENMASK(15, 0)
+
+#define AVE_PF_SIZE 17 /* Number of all packet filter */
+#define AVE_PF_MULTICAST_SIZE 7 /* Number of multicast filter */
+
+#define AVE_PFNUM_FILTER 0 /* No.0 */
+#define AVE_PFNUM_UNICAST 1 /* No.1 */
+#define AVE_PFNUM_BROADCAST 2 /* No.2 */
+#define AVE_PFNUM_MULTICAST 11 /* No.11-17 */
+
+/* NETIF Message control */
+#define AVE_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+
+/* Parameter for descriptor */
+#define AVE_NR_TXDESC 32 /* Tx descriptor */
+#define AVE_NR_RXDESC 64 /* Rx descriptor */
+
+#define AVE_DESC_OFS_CMDSTS 0
+#define AVE_DESC_OFS_ADDRL 4
+#define AVE_DESC_OFS_ADDRU 8
+
+/* Parameter for ethernet frame */
+#define AVE_MAX_ETHFRAME 1518
+
+/* Parameter for interrupt */
+#define AVE_INTM_COUNT 20
+#define AVE_FORCE_TXINTCNT 1
+
+#define IS_DESC_64BIT(p) ((p)->data->is_desc_64bit)
+
+enum desc_id {
+ AVE_DESCID_RX,
+ AVE_DESCID_TX,
+};
+
+enum desc_state {
+ AVE_DESC_RX_PERMIT,
+ AVE_DESC_RX_SUSPEND,
+ AVE_DESC_START,
+ AVE_DESC_STOP,
+};
+
+struct ave_desc {
+ struct sk_buff *skbs;
+ dma_addr_t skbs_dma;
+ size_t skbs_dmalen;
+};
+
+struct ave_desc_info {
+ u32 ndesc; /* number of descriptor */
+ u32 daddr; /* start address of descriptor */
+ u32 proc_idx; /* index of processing packet */
+ u32 done_idx; /* index of processed packet */
+ struct ave_desc *desc; /* skb info related descriptor */
+};
+
+struct ave_soc_data {
+ bool is_desc_64bit;
+};
+
+struct ave_stats {
+ struct u64_stats_sync syncp;
+ u64 packets;
+ u64 bytes;
+ u64 errors;
+ u64 dropped;
+ u64 collisions;
+ u64 fifo_errors;
+};
+
+struct ave_private {
+ void __iomem *base;
+ int irq;
+ int phy_id;
+ unsigned int desc_size;
+ u32 msg_enable;
+ struct clk *clk;
+ struct reset_control *rst;
+ phy_interface_t phy_mode;
+ struct phy_device *phydev;
+ struct mii_bus *mdio;
+
+ /* stats */
+ struct ave_stats stats_rx;
+ struct ave_stats stats_tx;
+
+ /* NAPI support */
+ struct net_device *ndev;
+ struct napi_struct napi_rx;
+ struct napi_struct napi_tx;
+
+ /* descriptor */
+ struct ave_desc_info rx;
+ struct ave_desc_info tx;
+
+ /* flow control */
+ int pause_auto;
+ int pause_rx;
+ int pause_tx;
+
+ const struct ave_soc_data *data;
+};
+
+static u32 ave_desc_read(struct net_device *ndev, enum desc_id id, int entry,
+ int offset)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 addr;
+
+ addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr)
+ + entry * priv->desc_size + offset;
+
+ return readl(priv->base + addr);
+}
+
+static u32 ave_desc_read_cmdsts(struct net_device *ndev, enum desc_id id,
+ int entry)
+{
+ return ave_desc_read(ndev, id, entry, AVE_DESC_OFS_CMDSTS);
+}
+
+static void ave_desc_write(struct net_device *ndev, enum desc_id id,
+ int entry, int offset, u32 val)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 addr;
+
+ addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr)
+ + entry * priv->desc_size + offset;
+
+ writel(val, priv->base + addr);
+}
+
+static void ave_desc_write_cmdsts(struct net_device *ndev, enum desc_id id,
+ int entry, u32 val)
+{
+ ave_desc_write(ndev, id, entry, AVE_DESC_OFS_CMDSTS, val);
+}
+
+static void ave_desc_write_addr(struct net_device *ndev, enum desc_id id,
+ int entry, dma_addr_t paddr)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ ave_desc_write(ndev, id, entry, AVE_DESC_OFS_ADDRL,
+ lower_32_bits(paddr));
+ if (IS_DESC_64BIT(priv))
+ ave_desc_write(ndev, id,
+ entry, AVE_DESC_OFS_ADDRU,
+ upper_32_bits(paddr));
+}
+
+static u32 ave_irq_disable_all(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 ret;
+
+ ret = readl(priv->base + AVE_GIMR);
+ writel(0, priv->base + AVE_GIMR);
+
+ return ret;
+}
+
+static void ave_irq_restore(struct net_device *ndev, u32 val)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ writel(val, priv->base + AVE_GIMR);
+}
+
+static void ave_irq_enable(struct net_device *ndev, u32 bitflag)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ writel(readl(priv->base + AVE_GIMR) | bitflag, priv->base + AVE_GIMR);
+ writel(bitflag, priv->base + AVE_GISR);
+}
+
+static void ave_hw_write_macaddr(struct net_device *ndev,
+ const unsigned char *mac_addr,
+ int reg1, int reg2)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ writel(mac_addr[0] | mac_addr[1] << 8 |
+ mac_addr[2] << 16 | mac_addr[3] << 24, priv->base + reg1);
+ writel(mac_addr[4] | mac_addr[5] << 8, priv->base + reg2);
+}
+
+static void ave_hw_read_version(struct net_device *ndev, char *buf, int len)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 major, minor, vr;
+
+ vr = readl(priv->base + AVE_VR);
+ major = (vr & GENMASK(15, 8)) >> 8;
+ minor = (vr & GENMASK(7, 0));
+ snprintf(buf, len, "v%u.%u", major, minor);
+}
+
+static void ave_ethtool_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct device *dev = ndev->dev.parent;
+
+ strlcpy(info->driver, dev->driver->name, sizeof(info->driver));
+ strlcpy(info->bus_info, dev_name(dev), sizeof(info->bus_info));
+ ave_hw_read_version(ndev, info->fw_version, sizeof(info->fw_version));
+}
+
+static u32 ave_ethtool_get_msglevel(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ return priv->msg_enable;
+}
+
+static void ave_ethtool_set_msglevel(struct net_device *ndev, u32 val)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ priv->msg_enable = val;
+}
+
+static void ave_ethtool_get_wol(struct net_device *ndev,
+ struct ethtool_wolinfo *wol)
+{
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (ndev->phydev)
+ phy_ethtool_get_wol(ndev->phydev, wol);
+}
+
+static int ave_ethtool_set_wol(struct net_device *ndev,
+ struct ethtool_wolinfo *wol)
+{
+ int ret;
+
+ if (!ndev->phydev ||
+ (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)))
+ return -EOPNOTSUPP;
+
+ ret = phy_ethtool_set_wol(ndev->phydev, wol);
+ if (!ret)
+ device_set_wakeup_enable(&ndev->dev, !!wol->wolopts);
+
+ return ret;
+}
+
+static void ave_ethtool_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ pause->autoneg = priv->pause_auto;
+ pause->rx_pause = priv->pause_rx;
+ pause->tx_pause = priv->pause_tx;
+}
+
+static int ave_ethtool_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
+
+ if (!phydev)
+ return -EINVAL;
+
+ priv->pause_auto = pause->autoneg;
+ priv->pause_rx = pause->rx_pause;
+ priv->pause_tx = pause->tx_pause;
+
+ phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ if (pause->rx_pause)
+ phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+ if (pause->tx_pause)
+ phydev->advertising ^= ADVERTISED_Asym_Pause;
+
+ if (pause->autoneg) {
+ if (netif_running(ndev))
+ phy_start_aneg(phydev);
+ }
+
+ return 0;
+}
+
+static const struct ethtool_ops ave_ethtool_ops = {
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_drvinfo = ave_ethtool_get_drvinfo,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = ave_ethtool_get_msglevel,
+ .set_msglevel = ave_ethtool_set_msglevel,
+ .get_wol = ave_ethtool_get_wol,
+ .set_wol = ave_ethtool_set_wol,
+ .get_pauseparam = ave_ethtool_get_pauseparam,
+ .set_pauseparam = ave_ethtool_set_pauseparam,
+};
+
+static int ave_mdiobus_read(struct mii_bus *bus, int phyid, int regnum)
+{
+ struct net_device *ndev = bus->priv;
+ struct ave_private *priv;
+ u32 mdioctl, mdiosr;
+ int ret;
+
+ priv = netdev_priv(ndev);
+
+ /* write address */
+ writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR);
+
+ /* read request */
+ mdioctl = readl(priv->base + AVE_MDIOCTR);
+ writel((mdioctl | AVE_MDIOCTR_RREQ) & ~AVE_MDIOCTR_WREQ,
+ priv->base + AVE_MDIOCTR);
+
+ ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr,
+ !(mdiosr & AVE_MDIOSR_STS), 20, 2000);
+ if (ret) {
+ netdev_err(ndev, "failed to read (phy:%d reg:%x)\n",
+ phyid, regnum);
+ return ret;
+ }
+
+ return readl(priv->base + AVE_MDIORDR) & GENMASK(15, 0);
+}
+
+static int ave_mdiobus_write(struct mii_bus *bus, int phyid, int regnum,
+ u16 val)
+{
+ struct net_device *ndev = bus->priv;
+ struct ave_private *priv;
+ u32 mdioctl, mdiosr;
+ int ret;
+
+ priv = netdev_priv(ndev);
+
+ /* write address */
+ writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR);
+
+ /* write data */
+ writel(val, priv->base + AVE_MDIOWDR);
+
+ /* write request */
+ mdioctl = readl(priv->base + AVE_MDIOCTR);
+ writel((mdioctl | AVE_MDIOCTR_WREQ) & ~AVE_MDIOCTR_RREQ,
+ priv->base + AVE_MDIOCTR);
+
+ ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr,
+ !(mdiosr & AVE_MDIOSR_STS), 20, 2000);
+ if (ret)
+ netdev_err(ndev, "failed to write (phy:%d reg:%x)\n",
+ phyid, regnum);
+
+ return ret;
+}
+
+static int ave_dma_map(struct net_device *ndev, struct ave_desc *desc,
+ void *ptr, size_t len, enum dma_data_direction dir,
+ dma_addr_t *paddr)
+{
+ dma_addr_t map_addr;
+
+ map_addr = dma_map_single(ndev->dev.parent, ptr, len, dir);
+ if (unlikely(dma_mapping_error(ndev->dev.parent, map_addr)))
+ return -ENOMEM;
+
+ desc->skbs_dma = map_addr;
+ desc->skbs_dmalen = len;
+ *paddr = map_addr;
+
+ return 0;
+}
+
+static void ave_dma_unmap(struct net_device *ndev, struct ave_desc *desc,
+ enum dma_data_direction dir)
+{
+ if (!desc->skbs_dma)
+ return;
+
+ dma_unmap_single(ndev->dev.parent,
+ desc->skbs_dma, desc->skbs_dmalen, dir);
+ desc->skbs_dma = 0;
+}
+
+/* Prepare Rx descriptor and memory */
+static int ave_rxdesc_prepare(struct net_device *ndev, int entry)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ int ret;
+
+ skb = priv->rx.desc[entry].skbs;
+ if (!skb) {
+ skb = netdev_alloc_skb_ip_align(ndev,
+ AVE_MAX_ETHFRAME);
+ if (!skb) {
+ netdev_err(ndev, "can't allocate skb for Rx\n");
+ return -ENOMEM;
+ }
+ }
+
+ /* set disable to cmdsts */
+ ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry,
+ AVE_STS_INTR | AVE_STS_OWN);
+
+ /* map Rx buffer
+ * Rx buffer set to the Rx descriptor has two restrictions:
+ * - Rx buffer address is 4 byte aligned.
+ * - Rx buffer begins with 2 byte headroom, and data will be put from
+ * (buffer + 2).
+ * To satisfy this, specify the address to put back the buffer
+ * pointer advanced by NET_IP_ALIGN by netdev_alloc_skb_ip_align(),
+ * and expand the map size by NET_IP_ALIGN.
+ */
+ ret = ave_dma_map(ndev, &priv->rx.desc[entry],
+ skb->data - NET_IP_ALIGN,
+ AVE_MAX_ETHFRAME + NET_IP_ALIGN,
+ DMA_FROM_DEVICE, &paddr);
+ if (ret) {
+ netdev_err(ndev, "can't map skb for Rx\n");
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ priv->rx.desc[entry].skbs = skb;
+
+ /* set buffer pointer */
+ ave_desc_write_addr(ndev, AVE_DESCID_RX, entry, paddr);
+
+ /* set enable to cmdsts */
+ ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry,
+ AVE_STS_INTR | AVE_MAX_ETHFRAME);
+
+ return ret;
+}
+
+/* Switch state of descriptor */
+static int ave_desc_switch(struct net_device *ndev, enum desc_state state)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ int ret = 0;
+ u32 val;
+
+ switch (state) {
+ case AVE_DESC_START:
+ writel(AVE_DESCC_TD | AVE_DESCC_RD0, priv->base + AVE_DESCC);
+ break;
+
+ case AVE_DESC_STOP:
+ writel(0, priv->base + AVE_DESCC);
+ if (readl_poll_timeout(priv->base + AVE_DESCC, val, !val,
+ 150, 15000)) {
+ netdev_err(ndev, "can't stop descriptor\n");
+ ret = -EBUSY;
+ }
+ break;
+
+ case AVE_DESC_RX_SUSPEND:
+ val = readl(priv->base + AVE_DESCC);
+ val |= AVE_DESCC_RDSTP;
+ val &= ~AVE_DESCC_STATUS_MASK;
+ writel(val, priv->base + AVE_DESCC);
+ if (readl_poll_timeout(priv->base + AVE_DESCC, val,
+ val & (AVE_DESCC_RDSTP << 16),
+ 150, 150000)) {
+ netdev_err(ndev, "can't suspend descriptor\n");
+ ret = -EBUSY;
+ }
+ break;
+
+ case AVE_DESC_RX_PERMIT:
+ val = readl(priv->base + AVE_DESCC);
+ val &= ~AVE_DESCC_RDSTP;
+ val &= ~AVE_DESCC_STATUS_MASK;
+ writel(val, priv->base + AVE_DESCC);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int ave_tx_complete(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 proc_idx, done_idx, ndesc, cmdsts;
+ unsigned int nr_freebuf = 0;
+ unsigned int tx_packets = 0;
+ unsigned int tx_bytes = 0;
+
+ proc_idx = priv->tx.proc_idx;
+ done_idx = priv->tx.done_idx;
+ ndesc = priv->tx.ndesc;
+
+ /* free pre-stored skb from done_idx to proc_idx */
+ while (proc_idx != done_idx) {
+ cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_TX, done_idx);
+
+ /* do nothing if owner is HW (==1 for Tx) */
+ if (cmdsts & AVE_STS_OWN)
+ break;
+
+ /* check Tx status and updates statistics */
+ if (cmdsts & AVE_STS_OK) {
+ tx_bytes += cmdsts & AVE_STS_PKTLEN_TX_MASK;
+ /* success */
+ if (cmdsts & AVE_STS_LAST)
+ tx_packets++;
+ } else {
+ /* error */
+ if (cmdsts & AVE_STS_LAST) {
+ priv->stats_tx.errors++;
+ if (cmdsts & (AVE_STS_OWC | AVE_STS_EC))
+ priv->stats_tx.collisions++;
+ }
+ }
+
+ /* release skb */
+ if (priv->tx.desc[done_idx].skbs) {
+ ave_dma_unmap(ndev, &priv->tx.desc[done_idx],
+ DMA_TO_DEVICE);
+ dev_consume_skb_any(priv->tx.desc[done_idx].skbs);
+ priv->tx.desc[done_idx].skbs = NULL;
+ nr_freebuf++;
+ }
+ done_idx = (done_idx + 1) % ndesc;
+ }
+
+ priv->tx.done_idx = done_idx;
+
+ /* update stats */
+ u64_stats_update_begin(&priv->stats_tx.syncp);
+ priv->stats_tx.packets += tx_packets;
+ priv->stats_tx.bytes += tx_bytes;
+ u64_stats_update_end(&priv->stats_tx.syncp);
+
+ /* wake queue for freeing buffer */
+ if (unlikely(netif_queue_stopped(ndev)) && nr_freebuf)
+ netif_wake_queue(ndev);
+
+ return nr_freebuf;
+}
+
+static int ave_rx_receive(struct net_device *ndev, int num)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ unsigned int rx_packets = 0;
+ unsigned int rx_bytes = 0;
+ u32 proc_idx, done_idx;
+ struct sk_buff *skb;
+ unsigned int pktlen;
+ int restpkt, npkts;
+ u32 ndesc, cmdsts;
+
+ proc_idx = priv->rx.proc_idx;
+ done_idx = priv->rx.done_idx;
+ ndesc = priv->rx.ndesc;
+ restpkt = ((proc_idx + ndesc - 1) - done_idx) % ndesc;
+
+ for (npkts = 0; npkts < num; npkts++) {
+ /* we can't receive more packet, so fill desc quickly */
+ if (--restpkt < 0)
+ break;
+
+ cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_RX, proc_idx);
+
+ /* do nothing if owner is HW (==0 for Rx) */
+ if (!(cmdsts & AVE_STS_OWN))
+ break;
+
+ if (!(cmdsts & AVE_STS_OK)) {
+ priv->stats_rx.errors++;
+ proc_idx = (proc_idx + 1) % ndesc;
+ continue;
+ }
+
+ pktlen = cmdsts & AVE_STS_PKTLEN_RX_MASK;
+
+ /* get skbuff for rx */
+ skb = priv->rx.desc[proc_idx].skbs;
+ priv->rx.desc[proc_idx].skbs = NULL;
+
+ ave_dma_unmap(ndev, &priv->rx.desc[proc_idx], DMA_FROM_DEVICE);
+
+ skb->dev = ndev;
+ skb_put(skb, pktlen);
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ if ((cmdsts & AVE_STS_CSSV) && (!(cmdsts & AVE_STS_CSER)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ rx_packets++;
+ rx_bytes += pktlen;
+
+ netif_receive_skb(skb);
+
+ proc_idx = (proc_idx + 1) % ndesc;
+ }
+
+ priv->rx.proc_idx = proc_idx;
+
+ /* update stats */
+ u64_stats_update_begin(&priv->stats_rx.syncp);
+ priv->stats_rx.packets += rx_packets;
+ priv->stats_rx.bytes += rx_bytes;
+ u64_stats_update_end(&priv->stats_rx.syncp);
+
+ /* refill the Rx buffers */
+ while (proc_idx != done_idx) {
+ if (ave_rxdesc_prepare(ndev, done_idx))
+ break;
+ done_idx = (done_idx + 1) % ndesc;
+ }
+
+ priv->rx.done_idx = done_idx;
+
+ return npkts;
+}
+
+static int ave_napi_poll_rx(struct napi_struct *napi, int budget)
+{
+ struct ave_private *priv;
+ struct net_device *ndev;
+ int num;
+
+ priv = container_of(napi, struct ave_private, napi_rx);
+ ndev = priv->ndev;
+
+ num = ave_rx_receive(ndev, budget);
+ if (num < budget) {
+ napi_complete_done(napi, num);
+
+ /* enable Rx interrupt when NAPI finishes */
+ ave_irq_enable(ndev, AVE_GI_RXIINT);
+ }
+
+ return num;
+}
+
+static int ave_napi_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct ave_private *priv;
+ struct net_device *ndev;
+ int num;
+
+ priv = container_of(napi, struct ave_private, napi_tx);
+ ndev = priv->ndev;
+
+ num = ave_tx_complete(ndev);
+ napi_complete(napi);
+
+ /* enable Tx interrupt when NAPI finishes */
+ ave_irq_enable(ndev, AVE_GI_TX);
+
+ return num;
+}
+
+static void ave_global_reset(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 val;
+
+ /* set config register */
+ val = AVE_CFGR_FLE | AVE_CFGR_IPFCEN | AVE_CFGR_CHE;
+ if (!phy_interface_mode_is_rgmii(priv->phy_mode))
+ val |= AVE_CFGR_MII;
+ writel(val, priv->base + AVE_CFGR);
+
+ /* reset RMII register */
+ val = readl(priv->base + AVE_RSTCTRL);
+ val &= ~AVE_RSTCTRL_RMIIRST;
+ writel(val, priv->base + AVE_RSTCTRL);
+
+ /* assert reset */
+ writel(AVE_GRR_GRST | AVE_GRR_PHYRST, priv->base + AVE_GRR);
+ msleep(20);
+
+ /* 1st, negate PHY reset only */
+ writel(AVE_GRR_GRST, priv->base + AVE_GRR);
+ msleep(40);
+
+ /* negate reset */
+ writel(0, priv->base + AVE_GRR);
+ msleep(40);
+
+ /* negate RMII register */
+ val = readl(priv->base + AVE_RSTCTRL);
+ val |= AVE_RSTCTRL_RMIIRST;
+ writel(val, priv->base + AVE_RSTCTRL);
+
+ ave_irq_disable_all(ndev);
+}
+
+static void ave_rxfifo_reset(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 rxcr_org;
+
+ /* save and disable MAC receive op */
+ rxcr_org = readl(priv->base + AVE_RXCR);
+ writel(rxcr_org & (~AVE_RXCR_RXEN), priv->base + AVE_RXCR);
+
+ /* suspend Rx descriptor */
+ ave_desc_switch(ndev, AVE_DESC_RX_SUSPEND);
+
+ /* receive all packets before descriptor starts */
+ ave_rx_receive(ndev, priv->rx.ndesc);
+
+ /* assert reset */
+ writel(AVE_GRR_RXFFR, priv->base + AVE_GRR);
+ usleep_range(40, 50);
+
+ /* negate reset */
+ writel(0, priv->base + AVE_GRR);
+ usleep_range(10, 20);
+
+ /* negate interrupt status */
+ writel(AVE_GI_RXOVF, priv->base + AVE_GISR);
+
+ /* permit descriptor */
+ ave_desc_switch(ndev, AVE_DESC_RX_PERMIT);
+
+ /* restore MAC reccieve op */
+ writel(rxcr_org, priv->base + AVE_RXCR);
+}
+
+static irqreturn_t ave_irq_handler(int irq, void *netdev)
+{
+ struct net_device *ndev = (struct net_device *)netdev;
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 gimr_val, gisr_val;
+
+ gimr_val = ave_irq_disable_all(ndev);
+
+ /* get interrupt status */
+ gisr_val = readl(priv->base + AVE_GISR);
+
+ /* PHY */
+ if (gisr_val & AVE_GI_PHY)
+ writel(AVE_GI_PHY, priv->base + AVE_GISR);
+
+ /* check exceeding packet */
+ if (gisr_val & AVE_GI_RXERR) {
+ writel(AVE_GI_RXERR, priv->base + AVE_GISR);
+ netdev_err(ndev, "receive a packet exceeding frame buffer\n");
+ }
+
+ gisr_val &= gimr_val;
+ if (!gisr_val)
+ goto exit_isr;
+
+ /* RxFIFO overflow */
+ if (gisr_val & AVE_GI_RXOVF) {
+ priv->stats_rx.fifo_errors++;
+ ave_rxfifo_reset(ndev);
+ goto exit_isr;
+ }
+
+ /* Rx drop */
+ if (gisr_val & AVE_GI_RXDROP) {
+ priv->stats_rx.dropped++;
+ writel(AVE_GI_RXDROP, priv->base + AVE_GISR);
+ }
+
+ /* Rx interval */
+ if (gisr_val & AVE_GI_RXIINT) {
+ napi_schedule(&priv->napi_rx);
+ /* still force to disable Rx interrupt until NAPI finishes */
+ gimr_val &= ~AVE_GI_RXIINT;
+ }
+
+ /* Tx completed */
+ if (gisr_val & AVE_GI_TX) {
+ napi_schedule(&priv->napi_tx);
+ /* still force to disable Tx interrupt until NAPI finishes */
+ gimr_val &= ~AVE_GI_TX;
+ }
+
+exit_isr:
+ ave_irq_restore(ndev, gimr_val);
+
+ return IRQ_HANDLED;
+}
+
+static int ave_pfsel_start(struct net_device *ndev, unsigned int entry)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 val;
+
+ if (WARN_ON(entry > AVE_PF_SIZE))
+ return -EINVAL;
+
+ val = readl(priv->base + AVE_PFEN);
+ writel(val | BIT(entry), priv->base + AVE_PFEN);
+
+ return 0;
+}
+
+static int ave_pfsel_stop(struct net_device *ndev, unsigned int entry)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 val;
+
+ if (WARN_ON(entry > AVE_PF_SIZE))
+ return -EINVAL;
+
+ val = readl(priv->base + AVE_PFEN);
+ writel(val & ~BIT(entry), priv->base + AVE_PFEN);
+
+ return 0;
+}
+
+static int ave_pfsel_set_macaddr(struct net_device *ndev,
+ unsigned int entry,
+ const unsigned char *mac_addr,
+ unsigned int set_size)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ if (WARN_ON(entry > AVE_PF_SIZE))
+ return -EINVAL;
+ if (WARN_ON(set_size > 6))
+ return -EINVAL;
+
+ ave_pfsel_stop(ndev, entry);
+
+ /* set MAC address for the filter */
+ ave_hw_write_macaddr(ndev, mac_addr,
+ AVE_PKTF(entry), AVE_PKTF(entry) + 4);
+
+ /* set byte mask */
+ writel(GENMASK(31, set_size) & AVE_PFMBYTE_MASK0,
+ priv->base + AVE_PFMBYTE(entry));
+ writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4);
+
+ /* set bit mask filter */
+ writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry));
+
+ /* set selector to ring 0 */
+ writel(0, priv->base + AVE_PFSEL(entry));
+
+ /* restart filter */
+ ave_pfsel_start(ndev, entry);
+
+ return 0;
+}
+
+static void ave_pfsel_set_promisc(struct net_device *ndev,
+ unsigned int entry, u32 rxring)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ if (WARN_ON(entry > AVE_PF_SIZE))
+ return;
+
+ ave_pfsel_stop(ndev, entry);
+
+ /* set byte mask */
+ writel(AVE_PFMBYTE_MASK0, priv->base + AVE_PFMBYTE(entry));
+ writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4);
+
+ /* set bit mask filter */
+ writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry));
+
+ /* set selector to rxring */
+ writel(rxring, priv->base + AVE_PFSEL(entry));
+
+ ave_pfsel_start(ndev, entry);
+}
+
+static void ave_pfsel_init(struct net_device *ndev)
+{
+ unsigned char bcast_mac[ETH_ALEN];
+ int i;
+
+ eth_broadcast_addr(bcast_mac);
+
+ for (i = 0; i < AVE_PF_SIZE; i++)
+ ave_pfsel_stop(ndev, i);
+
+ /* promiscious entry, select ring 0 */
+ ave_pfsel_set_promisc(ndev, AVE_PFNUM_FILTER, 0);
+
+ /* unicast entry */
+ ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6);
+
+ /* broadcast entry */
+ ave_pfsel_set_macaddr(ndev, AVE_PFNUM_BROADCAST, bcast_mac, 6);
+}
+
+static void ave_phy_adjust_link(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
+ u32 val, txcr, rxcr, rxcr_org;
+ u16 rmt_adv = 0, lcl_adv = 0;
+ u8 cap;
+
+ /* set RGMII speed */
+ val = readl(priv->base + AVE_TXCR);
+ val &= ~(AVE_TXCR_TXSPD_100 | AVE_TXCR_TXSPD_1G);
+
+ if (phy_interface_is_rgmii(phydev) && phydev->speed == SPEED_1000)
+ val |= AVE_TXCR_TXSPD_1G;
+ else if (phydev->speed == SPEED_100)
+ val |= AVE_TXCR_TXSPD_100;
+
+ writel(val, priv->base + AVE_TXCR);
+
+ /* set RMII speed (100M/10M only) */
+ if (!phy_interface_is_rgmii(phydev)) {
+ val = readl(priv->base + AVE_LINKSEL);
+ if (phydev->speed == SPEED_10)
+ val &= ~AVE_LINKSEL_100M;
+ else
+ val |= AVE_LINKSEL_100M;
+ writel(val, priv->base + AVE_LINKSEL);
+ }
+
+ /* check current RXCR/TXCR */
+ rxcr = readl(priv->base + AVE_RXCR);
+ txcr = readl(priv->base + AVE_TXCR);
+ rxcr_org = rxcr;
+
+ if (phydev->duplex) {
+ rxcr |= AVE_RXCR_FDUPEN;
+
+ if (phydev->pause)
+ rmt_adv |= LPA_PAUSE_CAP;
+ if (phydev->asym_pause)
+ rmt_adv |= LPA_PAUSE_ASYM;
+ if (phydev->advertising & ADVERTISED_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_CAP;
+ if (phydev->advertising & ADVERTISED_Asym_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+ cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+ if (cap & FLOW_CTRL_TX)
+ txcr |= AVE_TXCR_FLOCTR;
+ else
+ txcr &= ~AVE_TXCR_FLOCTR;
+ if (cap & FLOW_CTRL_RX)
+ rxcr |= AVE_RXCR_FLOCTR;
+ else
+ rxcr &= ~AVE_RXCR_FLOCTR;
+ } else {
+ rxcr &= ~AVE_RXCR_FDUPEN;
+ rxcr &= ~AVE_RXCR_FLOCTR;
+ txcr &= ~AVE_TXCR_FLOCTR;
+ }
+
+ if (rxcr_org != rxcr) {
+ /* disable Rx mac */
+ writel(rxcr & ~AVE_RXCR_RXEN, priv->base + AVE_RXCR);
+ /* change and enable TX/Rx mac */
+ writel(txcr, priv->base + AVE_TXCR);
+ writel(rxcr, priv->base + AVE_RXCR);
+ }
+
+ phy_print_status(phydev);
+}
+
+static void ave_macaddr_init(struct net_device *ndev)
+{
+ ave_hw_write_macaddr(ndev, ndev->dev_addr, AVE_RXMAC1R, AVE_RXMAC2R);
+
+ /* pfsel unicast entry */
+ ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6);
+}
+
+static int ave_init(struct net_device *ndev)
+{
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+ struct ave_private *priv = netdev_priv(ndev);
+ struct device *dev = ndev->dev.parent;
+ struct device_node *np = dev->of_node;
+ struct device_node *mdio_np;
+ struct phy_device *phydev;
+ int ret;
+
+ /* enable clk because of hw access until ndo_open */
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "can't enable clock\n");
+ return ret;
+ }
+ ret = reset_control_deassert(priv->rst);
+ if (ret) {
+ dev_err(dev, "can't deassert reset\n");
+ goto out_clk_disable;
+ }
+
+ ave_global_reset(ndev);
+
+ mdio_np = of_get_child_by_name(np, "mdio");
+ if (!mdio_np) {
+ dev_err(dev, "mdio node not found\n");
+ ret = -EINVAL;
+ goto out_reset_assert;
+ }
+ ret = of_mdiobus_register(priv->mdio, mdio_np);
+ of_node_put(mdio_np);
+ if (ret) {
+ dev_err(dev, "failed to register mdiobus\n");
+ goto out_reset_assert;
+ }
+
+ phydev = of_phy_get_and_connect(ndev, np, ave_phy_adjust_link);
+ if (!phydev) {
+ dev_err(dev, "could not attach to PHY\n");
+ ret = -ENODEV;
+ goto out_mdio_unregister;
+ }
+
+ priv->phydev = phydev;
+
+ phy_ethtool_get_wol(phydev, &wol);
+ device_set_wakeup_capable(&ndev->dev, !!wol.supported);
+
+ if (!phy_interface_is_rgmii(phydev)) {
+ phydev->supported &= ~PHY_GBIT_FEATURES;
+ phydev->supported |= PHY_BASIC_FEATURES;
+ }
+ phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+
+ phy_attached_info(phydev);
+
+ return 0;
+
+out_mdio_unregister:
+ mdiobus_unregister(priv->mdio);
+out_reset_assert:
+ reset_control_assert(priv->rst);
+out_clk_disable:
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
+}
+
+static void ave_uninit(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ phy_disconnect(priv->phydev);
+ mdiobus_unregister(priv->mdio);
+
+ /* disable clk because of hw access after ndo_stop */
+ reset_control_assert(priv->rst);
+ clk_disable_unprepare(priv->clk);
+}
+
+static int ave_open(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ int entry;
+ int ret;
+ u32 val;
+
+ ret = request_irq(priv->irq, ave_irq_handler, IRQF_SHARED, ndev->name,
+ ndev);
+ if (ret)
+ return ret;
+
+ priv->tx.desc = kcalloc(priv->tx.ndesc, sizeof(*priv->tx.desc),
+ GFP_KERNEL);
+ if (!priv->tx.desc) {
+ ret = -ENOMEM;
+ goto out_free_irq;
+ }
+
+ priv->rx.desc = kcalloc(priv->rx.ndesc, sizeof(*priv->rx.desc),
+ GFP_KERNEL);
+ if (!priv->rx.desc) {
+ kfree(priv->tx.desc);
+ ret = -ENOMEM;
+ goto out_free_irq;
+ }
+
+ /* initialize Tx work and descriptor */
+ priv->tx.proc_idx = 0;
+ priv->tx.done_idx = 0;
+ for (entry = 0; entry < priv->tx.ndesc; entry++) {
+ ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, entry, 0);
+ ave_desc_write_addr(ndev, AVE_DESCID_TX, entry, 0);
+ }
+ writel(AVE_TXDC_ADDR_START |
+ (((priv->tx.ndesc * priv->desc_size) << 16) & AVE_TXDC_SIZE),
+ priv->base + AVE_TXDC);
+
+ /* initialize Rx work and descriptor */
+ priv->rx.proc_idx = 0;
+ priv->rx.done_idx = 0;
+ for (entry = 0; entry < priv->rx.ndesc; entry++) {
+ if (ave_rxdesc_prepare(ndev, entry))
+ break;
+ }
+ writel(AVE_RXDC0_ADDR_START |
+ (((priv->rx.ndesc * priv->desc_size) << 16) & AVE_RXDC0_SIZE),
+ priv->base + AVE_RXDC0);
+
+ ave_desc_switch(ndev, AVE_DESC_START);
+
+ ave_pfsel_init(ndev);
+ ave_macaddr_init(ndev);
+
+ /* set Rx configuration */
+ /* full duplex, enable pause drop, enalbe flow control */
+ val = AVE_RXCR_RXEN | AVE_RXCR_FDUPEN | AVE_RXCR_DRPEN |
+ AVE_RXCR_FLOCTR | (AVE_MAX_ETHFRAME & AVE_RXCR_MPSIZ_MASK);
+ writel(val, priv->base + AVE_RXCR);
+
+ /* set Tx configuration */
+ /* enable flow control, disable loopback */
+ writel(AVE_TXCR_FLOCTR, priv->base + AVE_TXCR);
+
+ /* enable timer, clear EN,INTM, and mask interval unit(BSCK) */
+ val = readl(priv->base + AVE_IIRQC) & AVE_IIRQC_BSCK;
+ val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16);
+ writel(val, priv->base + AVE_IIRQC);
+
+ val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX;
+ ave_irq_restore(ndev, val);
+
+ napi_enable(&priv->napi_rx);
+ napi_enable(&priv->napi_tx);
+
+ phy_start(ndev->phydev);
+ phy_start_aneg(ndev->phydev);
+ netif_start_queue(ndev);
+
+ return 0;
+
+out_free_irq:
+ disable_irq(priv->irq);
+ free_irq(priv->irq, ndev);
+
+ return ret;
+}
+
+static int ave_stop(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ int entry;
+
+ ave_irq_disable_all(ndev);
+ disable_irq(priv->irq);
+ free_irq(priv->irq, ndev);
+
+ netif_tx_disable(ndev);
+ phy_stop(ndev->phydev);
+ napi_disable(&priv->napi_tx);
+ napi_disable(&priv->napi_rx);
+
+ ave_desc_switch(ndev, AVE_DESC_STOP);
+
+ /* free Tx buffer */
+ for (entry = 0; entry < priv->tx.ndesc; entry++) {
+ if (!priv->tx.desc[entry].skbs)
+ continue;
+
+ ave_dma_unmap(ndev, &priv->tx.desc[entry], DMA_TO_DEVICE);
+ dev_kfree_skb_any(priv->tx.desc[entry].skbs);
+ priv->tx.desc[entry].skbs = NULL;
+ }
+ priv->tx.proc_idx = 0;
+ priv->tx.done_idx = 0;
+
+ /* free Rx buffer */
+ for (entry = 0; entry < priv->rx.ndesc; entry++) {
+ if (!priv->rx.desc[entry].skbs)
+ continue;
+
+ ave_dma_unmap(ndev, &priv->rx.desc[entry], DMA_FROM_DEVICE);
+ dev_kfree_skb_any(priv->rx.desc[entry].skbs);
+ priv->rx.desc[entry].skbs = NULL;
+ }
+ priv->rx.proc_idx = 0;
+ priv->rx.done_idx = 0;
+
+ kfree(priv->tx.desc);
+ kfree(priv->rx.desc);
+
+ return 0;
+}
+
+static int ave_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 proc_idx, done_idx, ndesc, cmdsts;
+ int ret, freepkt;
+ dma_addr_t paddr;
+
+ proc_idx = priv->tx.proc_idx;
+ done_idx = priv->tx.done_idx;
+ ndesc = priv->tx.ndesc;
+ freepkt = ((done_idx + ndesc - 1) - proc_idx) % ndesc;
+
+ /* stop queue when not enough entry */
+ if (unlikely(freepkt < 1)) {
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* add padding for short packet */
+ if (skb_put_padto(skb, ETH_ZLEN)) {
+ priv->stats_tx.dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ /* map Tx buffer
+ * Tx buffer set to the Tx descriptor doesn't have any restriction.
+ */
+ ret = ave_dma_map(ndev, &priv->tx.desc[proc_idx],
+ skb->data, skb->len, DMA_TO_DEVICE, &paddr);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ priv->stats_tx.dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ priv->tx.desc[proc_idx].skbs = skb;
+
+ ave_desc_write_addr(ndev, AVE_DESCID_TX, proc_idx, paddr);
+
+ cmdsts = AVE_STS_OWN | AVE_STS_1ST | AVE_STS_LAST |
+ (skb->len & AVE_STS_PKTLEN_TX_MASK);
+
+ /* set interrupt per AVE_FORCE_TXINTCNT or when queue is stopped */
+ if (!(proc_idx % AVE_FORCE_TXINTCNT) || netif_queue_stopped(ndev))
+ cmdsts |= AVE_STS_INTR;
+
+ /* disable checksum calculation when skb doesn't calurate checksum */
+ if (skb->ip_summed == CHECKSUM_NONE ||
+ skb->ip_summed == CHECKSUM_UNNECESSARY)
+ cmdsts |= AVE_STS_NOCSUM;
+
+ ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, proc_idx, cmdsts);
+
+ priv->tx.proc_idx = (proc_idx + 1) % ndesc;
+
+ return NETDEV_TX_OK;
+}
+
+static int ave_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ return phy_mii_ioctl(ndev->phydev, ifr, cmd);
+}
+
+static const u8 v4multi_macadr[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
+static const u8 v6multi_macadr[] = { 0x33, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+static void ave_set_rx_mode(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ struct netdev_hw_addr *hw_adr;
+ int count, mc_cnt;
+ u32 val;
+
+ /* MAC addr filter enable for promiscious mode */
+ mc_cnt = netdev_mc_count(ndev);
+ val = readl(priv->base + AVE_RXCR);
+ if (ndev->flags & IFF_PROMISC || !mc_cnt)
+ val &= ~AVE_RXCR_AFEN;
+ else
+ val |= AVE_RXCR_AFEN;
+ writel(val, priv->base + AVE_RXCR);
+
+ /* set all multicast address */
+ if ((ndev->flags & IFF_ALLMULTI) || mc_cnt > AVE_PF_MULTICAST_SIZE) {
+ ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST,
+ v4multi_macadr, 1);
+ ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + 1,
+ v6multi_macadr, 1);
+ } else {
+ /* stop all multicast filter */
+ for (count = 0; count < AVE_PF_MULTICAST_SIZE; count++)
+ ave_pfsel_stop(ndev, AVE_PFNUM_MULTICAST + count);
+
+ /* set multicast addresses */
+ count = 0;
+ netdev_for_each_mc_addr(hw_adr, ndev) {
+ if (count == mc_cnt)
+ break;
+ ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + count,
+ hw_adr->addr, 6);
+ count++;
+ }
+ }
+}
+
+static void ave_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&priv->stats_rx.syncp);
+ stats->rx_packets = priv->stats_rx.packets;
+ stats->rx_bytes = priv->stats_rx.bytes;
+ } while (u64_stats_fetch_retry_irq(&priv->stats_rx.syncp, start));
+
+ do {
+ start = u64_stats_fetch_begin_irq(&priv->stats_tx.syncp);
+ stats->tx_packets = priv->stats_tx.packets;
+ stats->tx_bytes = priv->stats_tx.bytes;
+ } while (u64_stats_fetch_retry_irq(&priv->stats_tx.syncp, start));
+
+ stats->rx_errors = priv->stats_rx.errors;
+ stats->tx_errors = priv->stats_tx.errors;
+ stats->rx_dropped = priv->stats_rx.dropped;
+ stats->tx_dropped = priv->stats_tx.dropped;
+ stats->rx_fifo_errors = priv->stats_rx.fifo_errors;
+ stats->collisions = priv->stats_tx.collisions;
+}
+
+static int ave_set_mac_address(struct net_device *ndev, void *p)
+{
+ int ret = eth_mac_addr(ndev, p);
+
+ if (ret)
+ return ret;
+
+ ave_macaddr_init(ndev);
+
+ return 0;
+}
+
+static const struct net_device_ops ave_netdev_ops = {
+ .ndo_init = ave_init,
+ .ndo_uninit = ave_uninit,
+ .ndo_open = ave_open,
+ .ndo_stop = ave_stop,
+ .ndo_start_xmit = ave_start_xmit,
+ .ndo_do_ioctl = ave_ioctl,
+ .ndo_set_rx_mode = ave_set_rx_mode,
+ .ndo_get_stats64 = ave_get_stats64,
+ .ndo_set_mac_address = ave_set_mac_address,
+};
+
+static int ave_probe(struct platform_device *pdev)
+{
+ const struct ave_soc_data *data;
+ struct device *dev = &pdev->dev;
+ char buf[ETHTOOL_FWVERS_LEN];
+ phy_interface_t phy_mode;
+ struct ave_private *priv;
+ struct net_device *ndev;
+ struct device_node *np;
+ struct resource *res;
+ const void *mac_addr;
+ void __iomem *base;
+ u64 dma_mask;
+ int irq, ret;
+ u32 ave_id;
+
+ data = of_device_get_match_data(dev);
+ if (WARN_ON(!data))
+ return -EINVAL;
+
+ np = dev->of_node;
+ phy_mode = of_get_phy_mode(np);
+ if (phy_mode < 0) {
+ dev_err(dev, "phy-mode not found\n");
+ return -EINVAL;
+ }
+ if ((!phy_interface_mode_is_rgmii(phy_mode)) &&
+ phy_mode != PHY_INTERFACE_MODE_RMII &&
+ phy_mode != PHY_INTERFACE_MODE_MII) {
+ dev_err(dev, "phy-mode is invalid\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "IRQ not found\n");
+ return irq;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ ndev = alloc_etherdev(sizeof(struct ave_private));
+ if (!ndev) {
+ dev_err(dev, "can't allocate ethernet device\n");
+ return -ENOMEM;
+ }
+
+ ndev->netdev_ops = &ave_netdev_ops;
+ ndev->ethtool_ops = &ave_ethtool_ops;
+ SET_NETDEV_DEV(ndev, dev);
+
+ ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM);
+ ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM);
+
+ ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN);
+
+ mac_addr = of_get_mac_address(np);
+ if (mac_addr)
+ ether_addr_copy(ndev->dev_addr, mac_addr);
+
+ /* if the mac address is invalid, use random mac address */
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ eth_hw_addr_random(ndev);
+ dev_warn(dev, "Using random MAC address: %pM\n",
+ ndev->dev_addr);
+ }
+
+ priv = netdev_priv(ndev);
+ priv->base = base;
+ priv->irq = irq;
+ priv->ndev = ndev;
+ priv->msg_enable = netif_msg_init(-1, AVE_DEFAULT_MSG_ENABLE);
+ priv->phy_mode = phy_mode;
+ priv->data = data;
+
+ if (IS_DESC_64BIT(priv)) {
+ priv->desc_size = AVE_DESC_SIZE_64;
+ priv->tx.daddr = AVE_TXDM_64;
+ priv->rx.daddr = AVE_RXDM_64;
+ dma_mask = DMA_BIT_MASK(64);
+ } else {
+ priv->desc_size = AVE_DESC_SIZE_32;
+ priv->tx.daddr = AVE_TXDM_32;
+ priv->rx.daddr = AVE_RXDM_32;
+ dma_mask = DMA_BIT_MASK(32);
+ }
+ ret = dma_set_mask(dev, dma_mask);
+ if (ret)
+ goto out_free_netdev;
+
+ priv->tx.ndesc = AVE_NR_TXDESC;
+ priv->rx.ndesc = AVE_NR_RXDESC;
+
+ u64_stats_init(&priv->stats_tx.syncp);
+ u64_stats_init(&priv->stats_rx.syncp);
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto out_free_netdev;
+ }
+
+ priv->rst = devm_reset_control_get_optional_shared(dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ ret = PTR_ERR(priv->rst);
+ goto out_free_netdev;
+ }
+
+ priv->mdio = devm_mdiobus_alloc(dev);
+ if (!priv->mdio) {
+ ret = -ENOMEM;
+ goto out_free_netdev;
+ }
+ priv->mdio->priv = ndev;
+ priv->mdio->parent = dev;
+ priv->mdio->read = ave_mdiobus_read;
+ priv->mdio->write = ave_mdiobus_write;
+ priv->mdio->name = "uniphier-mdio";
+ snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, pdev->id);
+
+ /* Register as a NAPI supported driver */
+ netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx, priv->rx.ndesc);
+ netif_tx_napi_add(ndev, &priv->napi_tx, ave_napi_poll_tx,
+ priv->tx.ndesc);
+
+ platform_set_drvdata(pdev, ndev);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(dev, "failed to register netdevice\n");
+ goto out_del_napi;
+ }
+
+ /* get ID and version */
+ ave_id = readl(priv->base + AVE_IDR);
+ ave_hw_read_version(ndev, buf, sizeof(buf));
+
+ dev_info(dev, "Socionext %c%c%c%c Ethernet IP %s (irq=%d, phy=%s)\n",
+ (ave_id >> 24) & 0xff, (ave_id >> 16) & 0xff,
+ (ave_id >> 8) & 0xff, (ave_id >> 0) & 0xff,
+ buf, priv->irq, phy_modes(phy_mode));
+
+ return 0;
+
+out_del_napi:
+ netif_napi_del(&priv->napi_rx);
+ netif_napi_del(&priv->napi_tx);
+out_free_netdev:
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static int ave_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct ave_private *priv = netdev_priv(ndev);
+
+ unregister_netdev(ndev);
+ netif_napi_del(&priv->napi_rx);
+ netif_napi_del(&priv->napi_tx);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static const struct ave_soc_data ave_pro4_data = {
+ .is_desc_64bit = false,
+};
+
+static const struct ave_soc_data ave_pxs2_data = {
+ .is_desc_64bit = false,
+};
+
+static const struct ave_soc_data ave_ld11_data = {
+ .is_desc_64bit = false,
+};
+
+static const struct ave_soc_data ave_ld20_data = {
+ .is_desc_64bit = true,
+};
+
+static const struct of_device_id of_ave_match[] = {
+ {
+ .compatible = "socionext,uniphier-pro4-ave4",
+ .data = &ave_pro4_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs2-ave4",
+ .data = &ave_pxs2_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld11-ave4",
+ .data = &ave_ld11_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld20-ave4",
+ .data = &ave_ld20_data,
+ },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_ave_match);
+
+static struct platform_driver ave_driver = {
+ .probe = ave_probe,
+ .remove = ave_remove,
+ .driver = {
+ .name = "ave",
+ .of_match_table = of_ave_match,
+ },
+};
+module_platform_driver(ave_driver);
+
+MODULE_DESCRIPTION("Socionext UniPhier AVE ethernet driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index ce2ea2d491ac..2ffe76c0ff74 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -474,7 +474,7 @@ struct mac_device_info;
/* Helpers to program the MAC core */
struct stmmac_ops {
/* MAC core initialization */
- void (*core_init)(struct mac_device_info *hw, int mtu);
+ void (*core_init)(struct mac_device_info *hw, struct net_device *dev);
/* Enable the MAC RX/TX */
void (*set_mac)(void __iomem *ioaddr, bool enable);
/* Enable and verify that the IPC module is supported */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 4404650b32c5..5270d26f0bc6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -40,9 +40,7 @@
#define PRG_ETH0_CLK_M250_DIV_SHIFT 7
#define PRG_ETH0_CLK_M250_DIV_WIDTH 3
-/* divides the result of m25_sel by either 5 (bit unset) or 10 (bit set) */
-#define PRG_ETH0_CLK_M25_DIV_SHIFT 10
-#define PRG_ETH0_CLK_M25_DIV_WIDTH 1
+#define PRG_ETH0_RGMII_TX_CLK_EN 10
#define PRG_ETH0_INVERTED_RMII_CLK BIT(11)
#define PRG_ETH0_TX_AND_PHY_REF_CLK BIT(12)
@@ -63,8 +61,11 @@ struct meson8b_dwmac {
struct clk_divider m250_div;
struct clk *m250_div_clk;
- struct clk_divider m25_div;
- struct clk *m25_div_clk;
+ struct clk_fixed_factor fixed_div2;
+ struct clk *fixed_div2_clk;
+
+ struct clk_gate rgmii_tx_en;
+ struct clk *rgmii_tx_en_clk;
u32 tx_delay_ns;
};
@@ -81,7 +82,7 @@ static void meson8b_dwmac_mask_bits(struct meson8b_dwmac *dwmac, u32 reg,
writel(data, dwmac->regs + reg);
}
-static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
+static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
{
struct clk_init_data init;
int i, ret;
@@ -89,11 +90,6 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
char clk_name[32];
const char *clk_div_parents[1];
const char *mux_parent_names[MUX_CLK_NUM_PARENTS];
- static const struct clk_div_table clk_25m_div_table[] = {
- { .val = 0, .div = 5 },
- { .val = 1, .div = 10 },
- { /* sentinel */ },
- };
/* get the mux parents from DT */
for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) {
@@ -116,7 +112,7 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
snprintf(clk_name, sizeof(clk_name), "%s#m250_sel", dev_name(dev));
init.name = clk_name;
init.ops = &clk_mux_ops;
- init.flags = 0;
+ init.flags = CLK_SET_RATE_PARENT;
init.parent_names = mux_parent_names;
init.num_parents = MUX_CLK_NUM_PARENTS;
@@ -144,31 +140,48 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
dwmac->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT;
dwmac->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH;
dwmac->m250_div.hw.init = &init;
- dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO;
+ dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ALLOW_ZERO |
+ CLK_DIVIDER_ROUND_CLOSEST;
dwmac->m250_div_clk = devm_clk_register(dev, &dwmac->m250_div.hw);
if (WARN_ON(IS_ERR(dwmac->m250_div_clk)))
return PTR_ERR(dwmac->m250_div_clk);
- /* create the m25_div */
- snprintf(clk_name, sizeof(clk_name), "%s#m25_div", dev_name(dev));
+ /* create the fixed_div2 */
+ snprintf(clk_name, sizeof(clk_name), "%s#fixed_div2", dev_name(dev));
init.name = devm_kstrdup(dev, clk_name, GFP_KERNEL);
- init.ops = &clk_divider_ops;
- init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+ init.ops = &clk_fixed_factor_ops;
+ init.flags = CLK_SET_RATE_PARENT;
clk_div_parents[0] = __clk_get_name(dwmac->m250_div_clk);
init.parent_names = clk_div_parents;
init.num_parents = ARRAY_SIZE(clk_div_parents);
- dwmac->m25_div.reg = dwmac->regs + PRG_ETH0;
- dwmac->m25_div.shift = PRG_ETH0_CLK_M25_DIV_SHIFT;
- dwmac->m25_div.width = PRG_ETH0_CLK_M25_DIV_WIDTH;
- dwmac->m25_div.table = clk_25m_div_table;
- dwmac->m25_div.hw.init = &init;
- dwmac->m25_div.flags = CLK_DIVIDER_ALLOW_ZERO;
+ dwmac->fixed_div2.mult = 1;
+ dwmac->fixed_div2.div = 2;
+ dwmac->fixed_div2.hw.init = &init;
- dwmac->m25_div_clk = devm_clk_register(dev, &dwmac->m25_div.hw);
- if (WARN_ON(IS_ERR(dwmac->m25_div_clk)))
- return PTR_ERR(dwmac->m25_div_clk);
+ dwmac->fixed_div2_clk = devm_clk_register(dev, &dwmac->fixed_div2.hw);
+ if (WARN_ON(IS_ERR(dwmac->fixed_div2_clk)))
+ return PTR_ERR(dwmac->fixed_div2_clk);
+
+ /* create the rgmii_tx_en */
+ init.name = devm_kasprintf(dev, GFP_KERNEL, "%s#rgmii_tx_en",
+ dev_name(dev));
+ init.ops = &clk_gate_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ clk_div_parents[0] = __clk_get_name(dwmac->fixed_div2_clk);
+ init.parent_names = clk_div_parents;
+ init.num_parents = ARRAY_SIZE(clk_div_parents);
+
+ dwmac->rgmii_tx_en.reg = dwmac->regs + PRG_ETH0;
+ dwmac->rgmii_tx_en.bit_idx = PRG_ETH0_RGMII_TX_CLK_EN;
+ dwmac->rgmii_tx_en.hw.init = &init;
+
+ dwmac->rgmii_tx_en_clk = devm_clk_register(dev,
+ &dwmac->rgmii_tx_en.hw);
+ if (WARN_ON(IS_ERR(dwmac->rgmii_tx_en_clk)))
+ return PTR_ERR(dwmac->rgmii_tx_en_clk);
return 0;
}
@@ -176,7 +189,6 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
{
int ret;
- unsigned long clk_rate;
u8 tx_dly_val = 0;
switch (dwmac->phy_mode) {
@@ -191,9 +203,6 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_TXID:
- /* Generate a 25MHz clock for the PHY */
- clk_rate = 25 * 1000 * 1000;
-
/* enable RGMII mode */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE,
PRG_ETH0_RGMII_MODE);
@@ -204,12 +213,28 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK,
tx_dly_val << PRG_ETH0_TXDLY_SHIFT);
+
+ /* Configure the 125MHz RGMII TX clock, the IP block changes
+ * the output automatically (= without us having to configure
+ * a register) based on the line-speed (125MHz for Gbit speeds,
+ * 25MHz for 100Mbit/s and 2.5MHz for 10Mbit/s).
+ */
+ ret = clk_set_rate(dwmac->rgmii_tx_en_clk, 125 * 1000 * 1000);
+ if (ret) {
+ dev_err(&dwmac->pdev->dev,
+ "failed to set RGMII TX clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dwmac->rgmii_tx_en_clk);
+ if (ret) {
+ dev_err(&dwmac->pdev->dev,
+ "failed to enable the RGMII TX clock\n");
+ return ret;
+ }
break;
case PHY_INTERFACE_MODE_RMII:
- /* Use the rate of the mux clock for the internal RMII PHY */
- clk_rate = clk_get_rate(dwmac->m250_mux_clk);
-
/* disable RGMII mode -> enables RMII mode */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE,
0);
@@ -231,20 +256,6 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
return -EINVAL;
}
- ret = clk_prepare_enable(dwmac->m25_div_clk);
- if (ret) {
- dev_err(&dwmac->pdev->dev, "failed to enable the PHY clock\n");
- return ret;
- }
-
- ret = clk_set_rate(dwmac->m25_div_clk, clk_rate);
- if (ret) {
- clk_disable_unprepare(dwmac->m25_div_clk);
-
- dev_err(&dwmac->pdev->dev, "failed to set PHY clock\n");
- return ret;
- }
-
/* enable TX_CLK and PHY_REF_CLK generator */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TX_AND_PHY_REF_CLK,
PRG_ETH0_TX_AND_PHY_REF_CLK);
@@ -294,7 +305,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
&dwmac->tx_delay_ns))
dwmac->tx_delay_ns = 2;
- ret = meson8b_init_clk(dwmac);
+ ret = meson8b_init_rgmii_tx_clk(dwmac);
if (ret)
goto err_remove_config_dt;
@@ -311,7 +322,8 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
return 0;
err_clk_disable:
- clk_disable_unprepare(dwmac->m25_div_clk);
+ if (phy_interface_mode_is_rgmii(dwmac->phy_mode))
+ clk_disable_unprepare(dwmac->rgmii_tx_en_clk);
err_remove_config_dt:
stmmac_remove_config_dt(pdev, plat_dat);
@@ -322,7 +334,8 @@ static int meson8b_dwmac_remove(struct platform_device *pdev)
{
struct meson8b_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
- clk_disable_unprepare(dwmac->m25_div_clk);
+ if (phy_interface_mode_is_rgmii(dwmac->phy_mode))
+ clk_disable_unprepare(dwmac->rgmii_tx_en_clk);
return stmmac_pltfr_remove(pdev);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 9eb7f65d8000..a3fa65b1ca8e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -483,7 +483,8 @@ static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
return 0;
}
-static void sun8i_dwmac_core_init(struct mac_device_info *hw, int mtu)
+static void sun8i_dwmac_core_init(struct mac_device_info *hw,
+ struct net_device *dev)
{
void __iomem *ioaddr = hw->pcsr;
u32 v;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 8a86340ff2d3..540d21786a43 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -25,18 +25,28 @@
#include <linux/crc32.h>
#include <linux/slab.h>
#include <linux/ethtool.h>
+#include <net/dsa.h>
#include <asm/io.h>
#include "stmmac_pcs.h"
#include "dwmac1000.h"
-static void dwmac1000_core_init(struct mac_device_info *hw, int mtu)
+static void dwmac1000_core_init(struct mac_device_info *hw,
+ struct net_device *dev)
{
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONTROL);
+ int mtu = dev->mtu;
/* Configure GMAC core */
value |= GMAC_CORE_INIT;
+ /* Clear ACS bit because Ethernet switch tagging formats such as
+ * Broadcom tags can look like invalid LLC/SNAP packets and cause the
+ * hardware to truncate packets on reception.
+ */
+ if (netdev_uses_dsa(dev))
+ value &= ~GMAC_CONTROL_ACS;
+
if (mtu > 1500)
value |= GMAC_CONTROL_2K;
if (mtu > 2000)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 8ef517356313..91b23f9db31a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -25,15 +25,26 @@
*******************************************************************************/
#include <linux/crc32.h>
+#include <net/dsa.h>
#include <asm/io.h>
#include "dwmac100.h"
-static void dwmac100_core_init(struct mac_device_info *hw, int mtu)
+static void dwmac100_core_init(struct mac_device_info *hw,
+ struct net_device *dev)
{
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + MAC_CONTROL);
- writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL);
+ value |= MAC_CORE_INIT;
+
+ /* Clear ASTP bit because Ethernet switch tagging formats such as
+ * Broadcom tags can look like invalid LLC/SNAP packets and cause the
+ * hardware to truncate packets on reception.
+ */
+ if (netdev_uses_dsa(dev))
+ value &= ~MAC_CONTROL_ASTP;
+
+ writel(value, ioaddr + MAC_CONTROL);
#ifdef STMMAC_VLAN_TAG_USED
writel(ETH_P_8021Q, ioaddr + MAC_VLAN1);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index f3ed8f7853eb..ed222b20fcf1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -17,16 +17,26 @@
#include <linux/slab.h>
#include <linux/ethtool.h>
#include <linux/io.h>
+#include <net/dsa.h>
#include "stmmac_pcs.h"
#include "dwmac4.h"
-static void dwmac4_core_init(struct mac_device_info *hw, int mtu)
+static void dwmac4_core_init(struct mac_device_info *hw,
+ struct net_device *dev)
{
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONFIG);
+ int mtu = dev->mtu;
value |= GMAC_CORE_INIT;
+ /* Clear ACS bit because Ethernet switch tagging formats such as
+ * Broadcom tags can look like invalid LLC/SNAP packets and cause the
+ * hardware to truncate packets on reception.
+ */
+ if (netdev_uses_dsa(dev))
+ value &= ~GMAC_CONFIG_ACS;
+
if (mtu > 1500)
value |= GMAC_CONFIG_2K;
if (mtu > 2000)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 7e089bf906b4..c728ffa095de 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -334,7 +334,7 @@ static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
if (tx_own)
tdes3 |= TDES3_OWN;
- if (is_fs & tx_own)
+ if (is_fs && tx_own)
/* When the own bit, for the first frame, has to be set, all
* descriptors for the same frame has to be set before, to
* avoid race condition.
@@ -377,7 +377,7 @@ static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
if (tx_own)
tdes3 |= TDES3_OWN;
- if (is_fs & tx_own)
+ if (is_fs && tx_own)
/* When the own bit, for the first frame, has to be set, all
* descriptors for the same frame has to be set before, to
* avoid race condition.
@@ -406,7 +406,7 @@ static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
for (i = 0; i < size; i++) {
- pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+ pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
i, (unsigned int)virt_to_phys(p),
le32_to_cpu(p->des0), le32_to_cpu(p->des1),
le32_to_cpu(p->des2), le32_to_cpu(p->des3));
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 2a828a312814..6768a25b6aa0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -341,7 +341,7 @@ static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
if (tx_own)
tdes0 |= ETDES0_OWN;
- if (is_fs & tx_own)
+ if (is_fs && tx_own)
/* When the own bit, for the first frame, has to be set, all
* descriptors for the same frame has to be set before, to
* avoid race condition.
@@ -428,7 +428,7 @@ static void enh_desc_display_ring(void *head, unsigned int size, bool rx)
u64 x;
x = *(u64 *)ep;
- pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+ pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
i, (unsigned int)virt_to_phys(ep),
(unsigned int)x, (unsigned int)(x >> 32),
ep->basic.des2, ep->basic.des3);
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index db4cee57bb24..ebd9e5e00f16 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -288,7 +288,7 @@ static void ndesc_display_ring(void *head, unsigned int size, bool rx)
u64 x;
x = *(u64 *)p;
- pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
+ pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
i, (unsigned int)virt_to_phys(p),
(unsigned int)x, (unsigned int)(x >> 32),
p->des2, p->des3);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c0af0bc4e714..7ad841434ec8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2003,22 +2003,60 @@ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
static void stmmac_dma_interrupt(struct stmmac_priv *priv)
{
u32 tx_channel_count = priv->plat->tx_queues_to_use;
- int status;
+ u32 rx_channel_count = priv->plat->rx_queues_to_use;
+ u32 channels_to_check = tx_channel_count > rx_channel_count ?
+ tx_channel_count : rx_channel_count;
u32 chan;
+ bool poll_scheduled = false;
+ int status[channels_to_check];
+
+ /* Each DMA channel can be used for rx and tx simultaneously, yet
+ * napi_struct is embedded in struct stmmac_rx_queue rather than in a
+ * stmmac_channel struct.
+ * Because of this, stmmac_poll currently checks (and possibly wakes)
+ * all tx queues rather than just a single tx queue.
+ */
+ for (chan = 0; chan < channels_to_check; chan++)
+ status[chan] = priv->hw->dma->dma_interrupt(priv->ioaddr,
+ &priv->xstats,
+ chan);
- for (chan = 0; chan < tx_channel_count; chan++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
+ for (chan = 0; chan < rx_channel_count; chan++) {
+ if (likely(status[chan] & handle_rx)) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
- status = priv->hw->dma->dma_interrupt(priv->ioaddr,
- &priv->xstats, chan);
- if (likely((status & handle_rx)) || (status & handle_tx)) {
if (likely(napi_schedule_prep(&rx_q->napi))) {
stmmac_disable_dma_irq(priv, chan);
__napi_schedule(&rx_q->napi);
+ poll_scheduled = true;
+ }
+ }
+ }
+
+ /* If we scheduled poll, we already know that tx queues will be checked.
+ * If we didn't schedule poll, see if any DMA channel (used by tx) has a
+ * completed transmission, if so, call stmmac_poll (once).
+ */
+ if (!poll_scheduled) {
+ for (chan = 0; chan < tx_channel_count; chan++) {
+ if (status[chan] & handle_tx) {
+ /* It doesn't matter what rx queue we choose
+ * here. We use 0 since it always exists.
+ */
+ struct stmmac_rx_queue *rx_q =
+ &priv->rx_queue[0];
+
+ if (likely(napi_schedule_prep(&rx_q->napi))) {
+ stmmac_disable_dma_irq(priv, chan);
+ __napi_schedule(&rx_q->napi);
+ }
+ break;
}
}
+ }
- if (unlikely(status & tx_hard_error_bump_tc)) {
+ for (chan = 0; chan < tx_channel_count; chan++) {
+ if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
/* Try to bump up the dma threshold on this failure */
if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
(tc <= 256)) {
@@ -2035,7 +2073,7 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
chan);
priv->xstats.threshold = tc;
}
- } else if (unlikely(status == tx_hard_error)) {
+ } else if (unlikely(status[chan] == tx_hard_error)) {
stmmac_tx_err(priv, chan);
}
}
@@ -2489,7 +2527,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
}
/* Initialize the MAC Core */
- priv->hw->mac->core_init(priv->hw, dev->mtu);
+ priv->hw->mac->core_init(priv->hw, dev);
/* Initialize MTL*/
if (priv->synopsys_id >= DWMAC_CORE_4_00)
@@ -3404,9 +3442,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
if (netif_msg_rx_status(priv)) {
netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
p, entry, des);
- if (frame_len > ETH_FRAME_LEN)
- netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
- frame_len, status);
+ netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
+ frame_len, status);
}
/* The zero-copy is always used for all the sizes
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
index d655a4261e98..eb1c6b03c329 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
@@ -333,9 +333,8 @@ void xlgmac_print_pkt(struct net_device *netdev,
struct sk_buff *skb, bool tx_rx)
{
struct ethhdr *eth = (struct ethhdr *)skb->data;
- unsigned char *buf = skb->data;
unsigned char buffer[128];
- unsigned int i, j;
+ unsigned int i;
netdev_dbg(netdev, "\n************** SKB dump ****************\n");
@@ -346,22 +345,13 @@ void xlgmac_print_pkt(struct net_device *netdev,
netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
- for (i = 0, j = 0; i < skb->len;) {
- j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
- buf[i++]);
-
- if ((i % 32) == 0) {
- netdev_dbg(netdev, " %#06x: %s\n", i - 32, buffer);
- j = 0;
- } else if ((i % 16) == 0) {
- buffer[j++] = ' ';
- buffer[j++] = ' ';
- } else if ((i % 4) == 0) {
- buffer[j++] = ' ';
- }
+ for (i = 0; i < skb->len; i += 32) {
+ unsigned int len = min(skb->len - i, 32U);
+
+ hex_dump_to_buffer(&skb->data[i], len, 32, 1,
+ buffer, sizeof(buffer), false);
+ netdev_dbg(netdev, " %#06x: %s\n", i, buffer);
}
- if (i % 32)
- netdev_dbg(netdev, " %#06x: %s\n", i - (i % 32), buffer);
netdev_dbg(netdev, "\n************** SKB dump ****************\n");
}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index a73600dceb8b..3c85a0885f9b 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -88,6 +88,7 @@ do { \
#define CPSW_VERSION_4 0x190112
#define HOST_PORT_NUM 0
+#define CPSW_ALE_PORTS_NUM 3
#define SLIVER_SIZE 0x40
#define CPSW1_HOST_PORT_OFFSET 0x028
@@ -352,6 +353,27 @@ struct cpsw_hw_stats {
u32 rxdmaoverruns;
};
+struct cpsw_slave_data {
+ struct device_node *phy_node;
+ char phy_id[MII_BUS_ID_SIZE];
+ int phy_if;
+ u8 mac_addr[ETH_ALEN];
+ u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */
+};
+
+struct cpsw_platform_data {
+ struct cpsw_slave_data *slave_data;
+ u32 ss_reg_ofs; /* Subsystem control register offset */
+ u32 channels; /* number of cpdma channels (symmetric) */
+ u32 slaves; /* number of slave cpgmac ports */
+ u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
+ u32 ale_entries; /* ale table size */
+ u32 bd_ram_size; /*buffer descriptor ram size */
+ u32 mac_control; /* Mac control register */
+ u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
+ bool dual_emac; /* Enable Dual EMAC mode */
+};
+
struct cpsw_slave {
void __iomem *regs;
struct cpsw_sliver_regs __iomem *sliver;
@@ -365,12 +387,12 @@ struct cpsw_slave {
static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
{
- return __raw_readl(slave->regs + offset);
+ return readl_relaxed(slave->regs + offset);
}
static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
{
- __raw_writel(val, slave->regs + offset);
+ writel_relaxed(val, slave->regs + offset);
}
struct cpsw_vector {
@@ -660,8 +682,8 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
static void cpsw_intr_enable(struct cpsw_common *cpsw)
{
- __raw_writel(0xFF, &cpsw->wr_regs->tx_en);
- __raw_writel(0xFF, &cpsw->wr_regs->rx_en);
+ writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
+ writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
cpdma_ctlr_int_ctrl(cpsw->dma, true);
return;
@@ -669,8 +691,8 @@ static void cpsw_intr_enable(struct cpsw_common *cpsw)
static void cpsw_intr_disable(struct cpsw_common *cpsw)
{
- __raw_writel(0, &cpsw->wr_regs->tx_en);
- __raw_writel(0, &cpsw->wr_regs->rx_en);
+ writel_relaxed(0, &cpsw->wr_regs->tx_en);
+ writel_relaxed(0, &cpsw->wr_regs->rx_en);
cpdma_ctlr_int_ctrl(cpsw->dma, false);
return;
@@ -949,18 +971,14 @@ static inline void soft_reset(const char *module, void __iomem *reg)
{
unsigned long timeout = jiffies + HZ;
- __raw_writel(1, reg);
+ writel_relaxed(1, reg);
do {
cpu_relax();
- } while ((__raw_readl(reg) & 1) && time_after(timeout, jiffies));
+ } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
- WARN(__raw_readl(reg) & 1, "failed to soft-reset %s\n", module);
+ WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
}
-#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
- ((mac)[2] << 16) | ((mac)[3] << 24))
-#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
-
static void cpsw_set_slave_mac(struct cpsw_slave *slave,
struct cpsw_priv *priv)
{
@@ -1015,7 +1033,7 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
if (mac_control != slave->mac_control) {
phy_print_status(phy);
- __raw_writel(mac_control, &slave->sliver->mac_control);
+ writel_relaxed(mac_control, &slave->sliver->mac_control);
}
slave->mac_control = mac_control;
@@ -1278,7 +1296,7 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
soft_reset_slave(slave);
/* setup priority mapping */
- __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
+ writel_relaxed(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
switch (cpsw->version) {
case CPSW_VERSION_1:
@@ -1304,7 +1322,7 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
}
/* setup max packet size, and mac address */
- __raw_writel(cpsw->rx_packet_max, &slave->sliver->rx_maxlen);
+ writel_relaxed(cpsw->rx_packet_max, &slave->sliver->rx_maxlen);
cpsw_set_slave_mac(slave, priv);
slave->mac_control = 0; /* no link yet */
@@ -1395,9 +1413,9 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl);
/* setup host port priority mapping */
- __raw_writel(CPDMA_TX_PRIORITY_MAP,
- &cpsw->host_port_regs->cpdma_tx_pri_map);
- __raw_writel(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
+ writel_relaxed(CPDMA_TX_PRIORITY_MAP,
+ &cpsw->host_port_regs->cpdma_tx_pri_map);
+ writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
@@ -1514,10 +1532,10 @@ static int cpsw_ndo_open(struct net_device *ndev)
/* initialize shared resources for every ndev */
if (!cpsw->usage_count) {
/* disable priority elevation */
- __raw_writel(0, &cpsw->regs->ptype);
+ writel_relaxed(0, &cpsw->regs->ptype);
/* enable statistics collection only on all ports */
- __raw_writel(0x7, &cpsw->regs->stat_port_en);
+ writel_relaxed(0x7, &cpsw->regs->stat_port_en);
/* Enable internal fifo flow control */
writel(0x7, &cpsw->regs->flow_control);
@@ -1701,7 +1719,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
slave_write(slave, ctrl, CPSW2_CONTROL);
- __raw_writel(ETH_P_1588, &cpsw->regs->ts_ltype);
+ writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
}
static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
@@ -2298,7 +2316,6 @@ static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
{
- int (*poll)(struct napi_struct *, int);
struct cpsw_common *cpsw = priv->cpsw;
void (*handler)(void *, int, int);
struct netdev_queue *queue;
@@ -2309,12 +2326,10 @@ static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
ch = &cpsw->rx_ch_num;
vec = cpsw->rxv;
handler = cpsw_rx_handler;
- poll = cpsw_rx_poll;
} else {
ch = &cpsw->tx_ch_num;
vec = cpsw->txv;
handler = cpsw_tx_handler;
- poll = cpsw_tx_poll;
}
while (*ch < ch_num) {
@@ -3050,17 +3065,23 @@ static int cpsw_probe(struct platform_device *pdev)
}
cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0);
+ if (IS_ERR(cpsw->txv[0].ch)) {
+ dev_err(priv->dev, "error initializing tx dma channel\n");
+ ret = PTR_ERR(cpsw->txv[0].ch);
+ goto clean_dma_ret;
+ }
+
cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
- if (WARN_ON(!cpsw->rxv[0].ch || !cpsw->txv[0].ch)) {
- dev_err(priv->dev, "error initializing dma channels\n");
- ret = -ENOMEM;
+ if (IS_ERR(cpsw->rxv[0].ch)) {
+ dev_err(priv->dev, "error initializing rx dma channel\n");
+ ret = PTR_ERR(cpsw->rxv[0].ch);
goto clean_dma_ret;
}
ale_params.dev = &pdev->dev;
ale_params.ale_ageout = ale_ageout;
ale_params.ale_entries = data->ale_entries;
- ale_params.ale_ports = data->slaves;
+ ale_params.ale_ports = CPSW_ALE_PORTS_NUM;
cpsw->ale = cpsw_ale_create(&ale_params);
if (!cpsw->ale) {
@@ -3072,14 +3093,14 @@ static int cpsw_probe(struct platform_device *pdev)
cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node);
if (IS_ERR(cpsw->cpts)) {
ret = PTR_ERR(cpsw->cpts);
- goto clean_ale_ret;
+ goto clean_dma_ret;
}
ndev->irq = platform_get_irq(pdev, 1);
if (ndev->irq < 0) {
dev_err(priv->dev, "error getting irq resource\n");
ret = ndev->irq;
- goto clean_ale_ret;
+ goto clean_dma_ret;
}
of_id = of_match_device(cpsw_of_mtable, &pdev->dev);
@@ -3103,7 +3124,7 @@ static int cpsw_probe(struct platform_device *pdev)
if (ret) {
dev_err(priv->dev, "error registering net device\n");
ret = -ENODEV;
- goto clean_ale_ret;
+ goto clean_dma_ret;
}
if (cpsw->data.dual_emac) {
@@ -3126,7 +3147,7 @@ static int cpsw_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 1);
if (irq < 0) {
ret = irq;
- goto clean_ale_ret;
+ goto clean_dma_ret;
}
cpsw->irqs_table[0] = irq;
@@ -3134,14 +3155,14 @@ static int cpsw_probe(struct platform_device *pdev)
0, dev_name(&pdev->dev), cpsw);
if (ret < 0) {
dev_err(priv->dev, "error attaching irq (%d)\n", ret);
- goto clean_ale_ret;
+ goto clean_dma_ret;
}
/* TX IRQ */
irq = platform_get_irq(pdev, 2);
if (irq < 0) {
ret = irq;
- goto clean_ale_ret;
+ goto clean_dma_ret;
}
cpsw->irqs_table[1] = irq;
@@ -3149,7 +3170,7 @@ static int cpsw_probe(struct platform_device *pdev)
0, dev_name(&pdev->dev), cpsw);
if (ret < 0) {
dev_err(priv->dev, "error attaching irq (%d)\n", ret);
- goto clean_ale_ret;
+ goto clean_dma_ret;
}
cpsw_notice(priv, probe,
@@ -3162,8 +3183,6 @@ static int cpsw_probe(struct platform_device *pdev)
clean_unregister_netdev_ret:
unregister_netdev(ndev);
-clean_ale_ret:
- cpsw_ale_destroy(cpsw->ale);
clean_dma_ret:
cpdma_ctlr_destroy(cpsw->dma);
clean_dt_ret:
@@ -3193,7 +3212,6 @@ static int cpsw_remove(struct platform_device *pdev)
unregister_netdev(ndev);
cpts_release(cpsw->cpts);
- cpsw_ale_destroy(cpsw->ale);
cpdma_ctlr_destroy(cpsw->dma);
cpsw_remove_dt(pdev);
pm_runtime_put_sync(&pdev->dev);
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index 6c3037aa2cd3..cf111db3dc27 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -17,26 +17,9 @@
#include <linux/if_ether.h>
#include <linux/phy.h>
-struct cpsw_slave_data {
- struct device_node *phy_node;
- char phy_id[MII_BUS_ID_SIZE];
- int phy_if;
- u8 mac_addr[ETH_ALEN];
- u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */
-};
-
-struct cpsw_platform_data {
- struct cpsw_slave_data *slave_data;
- u32 ss_reg_ofs; /* Subsystem control register offset */
- u32 channels; /* number of cpdma channels (symmetric) */
- u32 slaves; /* number of slave cpgmac ports */
- u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
- u32 ale_entries; /* ale table size */
- u32 bd_ram_size; /*buffer descriptor ram size */
- u32 mac_control; /* Mac control register */
- u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
- bool dual_emac; /* Enable Dual EMAC mode */
-};
+#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
+ ((mac)[2] << 16) | ((mac)[3] << 24))
+#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave);
int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index b432a75fb874..93dc05c194d3 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -150,11 +150,11 @@ static int cpsw_ale_read(struct cpsw_ale *ale, int idx, u32 *ale_entry)
WARN_ON(idx > ale->params.ale_entries);
- __raw_writel(idx, ale->params.ale_regs + ALE_TABLE_CONTROL);
+ writel_relaxed(idx, ale->params.ale_regs + ALE_TABLE_CONTROL);
for (i = 0; i < ALE_ENTRY_WORDS; i++)
- ale_entry[i] = __raw_readl(ale->params.ale_regs +
- ALE_TABLE + 4 * i);
+ ale_entry[i] = readl_relaxed(ale->params.ale_regs +
+ ALE_TABLE + 4 * i);
return idx;
}
@@ -166,11 +166,11 @@ static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry)
WARN_ON(idx > ale->params.ale_entries);
for (i = 0; i < ALE_ENTRY_WORDS; i++)
- __raw_writel(ale_entry[i], ale->params.ale_regs +
- ALE_TABLE + 4 * i);
+ writel_relaxed(ale_entry[i], ale->params.ale_regs +
+ ALE_TABLE + 4 * i);
- __raw_writel(idx | ALE_TABLE_WRITE, ale->params.ale_regs +
- ALE_TABLE_CONTROL);
+ writel_relaxed(idx | ALE_TABLE_WRITE, ale->params.ale_regs +
+ ALE_TABLE_CONTROL);
return idx;
}
@@ -723,7 +723,7 @@ int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control,
if (info->port_offset == 0 && info->port_shift == 0)
port = 0; /* global, port is a dont care */
- if (port < 0 || port > ale->params.ale_ports)
+ if (port < 0 || port >= ale->params.ale_ports)
return -EINVAL;
mask = BITMASK(info->bits);
@@ -733,9 +733,9 @@ int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control,
offset = info->offset + (port * info->port_offset);
shift = info->shift + (port * info->port_shift);
- tmp = __raw_readl(ale->params.ale_regs + offset);
+ tmp = readl_relaxed(ale->params.ale_regs + offset);
tmp = (tmp & ~(mask << shift)) | (value << shift);
- __raw_writel(tmp, ale->params.ale_regs + offset);
+ writel_relaxed(tmp, ale->params.ale_regs + offset);
return 0;
}
@@ -754,13 +754,13 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
if (info->port_offset == 0 && info->port_shift == 0)
port = 0; /* global, port is a dont care */
- if (port < 0 || port > ale->params.ale_ports)
+ if (port < 0 || port >= ale->params.ale_ports)
return -EINVAL;
offset = info->offset + (port * info->port_offset);
shift = info->shift + (port * info->port_shift);
- tmp = __raw_readl(ale->params.ale_regs + offset) >> shift;
+ tmp = readl_relaxed(ale->params.ale_regs + offset) >> shift;
return tmp & BITMASK(info->bits);
}
EXPORT_SYMBOL_GPL(cpsw_ale_control_get);
@@ -779,9 +779,37 @@ static void cpsw_ale_timer(struct timer_list *t)
void cpsw_ale_start(struct cpsw_ale *ale)
{
+ cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
+ cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
+
+ timer_setup(&ale->timer, cpsw_ale_timer, 0);
+ if (ale->ageout) {
+ ale->timer.expires = jiffies + ale->ageout;
+ add_timer(&ale->timer);
+ }
+}
+EXPORT_SYMBOL_GPL(cpsw_ale_start);
+
+void cpsw_ale_stop(struct cpsw_ale *ale)
+{
+ del_timer_sync(&ale->timer);
+ cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
+}
+EXPORT_SYMBOL_GPL(cpsw_ale_stop);
+
+struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
+{
+ struct cpsw_ale *ale;
u32 rev, ale_entries;
- rev = __raw_readl(ale->params.ale_regs + ALE_IDVER);
+ ale = devm_kzalloc(params->dev, sizeof(*ale), GFP_KERNEL);
+ if (!ale)
+ return NULL;
+
+ ale->params = *params;
+ ale->ageout = ale->params.ale_ageout * HZ;
+
+ rev = readl_relaxed(ale->params.ale_regs + ALE_IDVER);
if (!ale->params.major_ver_mask)
ale->params.major_ver_mask = 0xff;
ale->version =
@@ -793,8 +821,8 @@ void cpsw_ale_start(struct cpsw_ale *ale)
if (!ale->params.ale_entries) {
ale_entries =
- __raw_readl(ale->params.ale_regs + ALE_STATUS) &
- ALE_STATUS_SIZE_MASK;
+ readl_relaxed(ale->params.ale_regs + ALE_STATUS) &
+ ALE_STATUS_SIZE_MASK;
/* ALE available on newer NetCP switches has introduced
* a register, ALE_STATUS, to indicate the size of ALE
* table which shows the size as a multiple of 1024 entries.
@@ -816,9 +844,9 @@ void cpsw_ale_start(struct cpsw_ale *ale)
"ALE Table size %ld\n", ale->params.ale_entries);
/* set default bits for existing h/w */
- ale->port_mask_bits = 3;
- ale->port_num_bits = 2;
- ale->vlan_field_bits = 3;
+ ale->port_mask_bits = ale->params.ale_ports;
+ ale->port_num_bits = order_base_2(ale->params.ale_ports);
+ ale->vlan_field_bits = ale->params.ale_ports;
/* Set defaults override for ALE on NetCP NU switch and for version
* 1R3
@@ -847,57 +875,12 @@ void cpsw_ale_start(struct cpsw_ale *ale)
ale_controls[ALE_PORT_UNTAGGED_EGRESS].shift = 0;
ale_controls[ALE_PORT_UNTAGGED_EGRESS].offset =
ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS;
- ale->port_mask_bits = ale->params.ale_ports;
- ale->port_num_bits = ale->params.ale_ports - 1;
- ale->vlan_field_bits = ale->params.ale_ports;
- } else if (ale->version == ALE_VERSION_1R3) {
- ale->port_mask_bits = ale->params.ale_ports;
- ale->port_num_bits = 3;
- ale->vlan_field_bits = ale->params.ale_ports;
}
- cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
- cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
-
- timer_setup(&ale->timer, cpsw_ale_timer, 0);
- if (ale->ageout) {
- ale->timer.expires = jiffies + ale->ageout;
- add_timer(&ale->timer);
- }
-}
-EXPORT_SYMBOL_GPL(cpsw_ale_start);
-
-void cpsw_ale_stop(struct cpsw_ale *ale)
-{
- del_timer_sync(&ale->timer);
-}
-EXPORT_SYMBOL_GPL(cpsw_ale_stop);
-
-struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
-{
- struct cpsw_ale *ale;
-
- ale = kzalloc(sizeof(*ale), GFP_KERNEL);
- if (!ale)
- return NULL;
-
- ale->params = *params;
- ale->ageout = ale->params.ale_ageout * HZ;
-
return ale;
}
EXPORT_SYMBOL_GPL(cpsw_ale_create);
-int cpsw_ale_destroy(struct cpsw_ale *ale)
-{
- if (!ale)
- return -EINVAL;
- cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
- kfree(ale);
- return 0;
-}
-EXPORT_SYMBOL_GPL(cpsw_ale_destroy);
-
void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data)
{
int i;
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 25d24e8d0904..d4fe9016429b 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -100,7 +100,6 @@ enum cpsw_ale_port_state {
#define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params);
-int cpsw_ale_destroy(struct cpsw_ale *ale);
void cpsw_ale_start(struct cpsw_ale *ale);
void cpsw_ale_stop(struct cpsw_ale *ale);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index e4d6edf387b3..6f9173ff9414 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -893,7 +893,7 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num);
if (__chan_linear(chan_num) >= ctlr->num_chan)
- return NULL;
+ return ERR_PTR(-EINVAL);
chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
if (!chan)
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 4bb561856af5..abceea802ea1 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1385,11 +1385,6 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
return -EOPNOTSUPP;
}
-static int match_first_device(struct device *dev, void *data)
-{
- return !strncmp(dev_name(dev), "davinci_mdio", 12);
-}
-
/**
* emac_dev_open - EMAC device open
* @ndev: The DaVinci EMAC network adapter
@@ -1489,8 +1484,8 @@ static int emac_dev_open(struct net_device *ndev)
/* use the first phy on the bus if pdata did not give us a phy id */
if (!phydev && !priv->phy_id) {
- phy = bus_find_device(&mdio_bus_type, NULL, NULL,
- match_first_device);
+ phy = bus_find_device_by_name(&mdio_bus_type, NULL,
+ "davinci_mdio");
if (phy) {
priv->phy_id = dev_name(phy);
if (!priv->phy_id || !*priv->phy_id)
@@ -1875,10 +1870,17 @@ static int davinci_emac_probe(struct platform_device *pdev)
priv->txchan = cpdma_chan_create(priv->dma, EMAC_DEF_TX_CH,
emac_tx_handler, 0);
+ if (IS_ERR(priv->txchan)) {
+ dev_err(&pdev->dev, "error initializing tx dma channel\n");
+ rc = PTR_ERR(priv->txchan);
+ goto no_cpdma_chan;
+ }
+
priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH,
emac_rx_handler, 1);
- if (WARN_ON(!priv->txchan || !priv->rxchan)) {
- rc = -ENOMEM;
+ if (IS_ERR(priv->rxchan)) {
+ dev_err(&pdev->dev, "error initializing rx dma channel\n");
+ rc = PTR_ERR(priv->rxchan);
goto no_cpdma_chan;
}
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index e831c49713ee..56dbc0b9fedc 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -27,6 +27,7 @@
#include <linux/net_tstamp.h>
#include <linux/ethtool.h>
+#include "cpsw.h"
#include "cpsw_ale.h"
#include "netcp.h"
#include "cpts.h"
@@ -2047,10 +2048,6 @@ static const struct ethtool_ops keystone_ethtool_ops = {
.get_ts_info = keystone_get_ts_info,
};
-#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
- ((mac)[2] << 16) | ((mac)[3] << 24))
-#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
-
static void gbe_set_slave_mac(struct gbe_slave *slave,
struct gbe_intf *gbe_intf)
{
@@ -3692,7 +3689,6 @@ static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
del_timer_sync(&gbe_dev->timer);
cpts_release(gbe_dev->cpts);
cpsw_ale_stop(gbe_dev->ale);
- cpsw_ale_destroy(gbe_dev->ale);
netcp_txpipe_close(&gbe_dev->tx_pipe);
free_secondary_ports(gbe_dev);