summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_main.c14
-rw-r--r--drivers/net/bonding/bond_options.c23
-rw-r--r--drivers/net/can/cc770/cc770.c2
-rw-r--r--drivers/net/can/dev.c14
-rw-r--r--drivers/net/can/flexcan.c33
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/can/m_can/m_can.c18
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c5
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c2
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c39
-rw-r--r--drivers/net/can/sja1000/peak_pci.c2
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c2
-rw-r--r--drivers/net/can/sun4i_can.c2
-rw-r--r--drivers/net/can/usb/Kconfig48
-rw-r--r--drivers/net/can/usb/Makefile7
-rw-r--r--drivers/net/can/usb/ems_usb.c1
-rw-r--r--drivers/net/can/usb/kvaser_usb.c2085
-rw-r--r--drivers/net/can/usb/kvaser_usb/Makefile2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h188
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c835
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c2028
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c1358
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c1
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c1
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c2
-rw-r--r--drivers/net/can/usb/ucan.c1613
-rw-r--r--drivers/net/can/xilinx_can.c784
-rw-r--r--drivers/net/dsa/bcm_sf2.c12
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c3
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c25
-rw-r--r--drivers/net/ethernet/3com/Kconfig2
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/Makefile2
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c1
-rw-r--r--drivers/net/ethernet/amd/Kconfig4
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c7
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c1
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig8
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c13
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c216
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h30
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h66
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c89
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c378
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h37
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h1227
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c44
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c296
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c9
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c80
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c80
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c21
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c18
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c30
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c22
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c24
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c80
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c12
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c42
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c300
-rw-r--r--drivers/net/ethernet/jme.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c409
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.c15
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.h8
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c64
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h67
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c305
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c190
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c254
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c80
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c230
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/vxlan.h)39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.c190
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h557
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c509
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c1199
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h117
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c269
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c56
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c2
-rw-r--r--drivers/net/ethernet/neterion/Kconfig23
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c20
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h9
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c25
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c28
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h7
-rw-r--r--drivers/net/ethernet/realtek/r8169.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c40
-rw-r--r--drivers/net/ethernet/ti/cpsw.c60
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c1
-rw-r--r--drivers/net/hyperv/hyperv_net.h11
-rw-r--r--drivers/net/hyperv/netvsc_drv.c106
-rw-r--r--drivers/net/net_failover.c4
-rw-r--r--drivers/net/netdevsim/devlink.c1
-rw-r--r--drivers/net/netdevsim/netdev.c6
-rw-r--r--drivers/net/netdevsim/netdevsim.h3
-rw-r--r--drivers/net/phy/marvell.c2
-rw-r--r--drivers/net/phy/mdio-mux-bcm-iproc.c110
-rw-r--r--drivers/net/phy/mscc.c2
-rw-r--r--drivers/net/phy/phy.c18
-rw-r--r--drivers/net/ppp/ppp_mppe.c56
-rw-r--r--drivers/net/tun.c3
-rw-r--r--drivers/net/usb/lan78xx.c4
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/sr9700.c2
-rw-r--r--drivers/net/virtio_net.c199
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/net/wireless/atmel/atmel.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c38
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c3
-rw-r--r--drivers/net/wireless/cisco/airo.c8
-rw-r--r--drivers/net/wireless/cisco/airo_cs.c3
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c7
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_wx.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c10
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/2000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c163
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/6000.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c70
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h172
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h250
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/common_rx.c88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c284
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h286
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c74
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c110
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c50
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c205
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c364
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c207
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h294
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c388
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c235
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c192
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c92
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ap.c8
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c17
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_proc.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c95
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c34
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ie.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c33
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h17
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_tx.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c25
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig26
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile20
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c77
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h43
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h162
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/Makefile7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/core.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c166
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/dma.c522
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/dma.h126
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c445
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h149
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c720
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h282
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h772
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mac.c660
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mac.h154
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c403
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c656
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h101
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h330
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c1008
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.h81
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/regs.h651
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/trace.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/trace.h313
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/tx.c270
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c381
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.h61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/util.c42
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2.h91
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_common.c350
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dma.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dma.h38
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_init.c305
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c259
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac.c656
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c699
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_main.c323
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_phy.c347
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c349
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_regs.h30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_tx.c128
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c149
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_usb.c142
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u.h83
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_core.c108
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_init.c318
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c240
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_main.c185
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c463
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c303
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c85
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c845
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_mcu.c242
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.h71
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/init.c1
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c11
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c100
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c154
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c1
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h85
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c18
-rw-r--r--drivers/net/wireless/ray_cs.c6
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c28
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c7
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c23
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c5
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h2
-rw-r--r--drivers/net/wireless/rsi/rsi_sdio.h3
-rw-r--r--drivers/net/wireless/rsi/rsi_usb.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c10
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c8
-rw-r--r--drivers/net/xen-netback/netback.c4
-rw-r--r--drivers/net/xen-netfront.c6
368 files changed, 31720 insertions, 7470 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 9a2ea3c1f949..a764a83f99da 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1717,6 +1717,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
goto err_upper_unlink;
}
+ bond->nest_level = dev_get_nest_level(bond_dev) + 1;
+
/* If the mode uses primary, then the following is handled by
* bond_change_active_slave().
*/
@@ -1764,7 +1766,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
if (bond_mode_can_use_xmit_hash(bond))
bond_update_slave_arr(bond, NULL);
- bond->nest_level = dev_get_nest_level(bond_dev);
netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n",
slave_dev->name,
@@ -3415,6 +3416,13 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res,
}
}
+static int bond_get_nest_level(struct net_device *bond_dev)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+
+ return bond->nest_level;
+}
+
static void bond_get_stats(struct net_device *bond_dev,
struct rtnl_link_stats64 *stats)
{
@@ -3423,7 +3431,7 @@ static void bond_get_stats(struct net_device *bond_dev,
struct list_head *iter;
struct slave *slave;
- spin_lock(&bond->stats_lock);
+ spin_lock_nested(&bond->stats_lock, bond_get_nest_level(bond_dev));
memcpy(stats, &bond->bond_stats, sizeof(*stats));
rcu_read_lock();
@@ -4228,6 +4236,7 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_neigh_setup = bond_neigh_setup,
.ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
+ .ndo_get_lock_subclass = bond_get_nest_level,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_netpoll_setup = bond_netpoll_setup,
.ndo_netpoll_cleanup = bond_netpoll_cleanup,
@@ -4726,6 +4735,7 @@ static int bond_init(struct net_device *bond_dev)
if (!bond->wq)
return -ENOMEM;
+ bond->nest_level = SINGLE_DEPTH_NESTING;
netdev_lockdep_set_classes(bond_dev);
list_add_tail(&bond->bond_list, &bn->dev_list);
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 98663c50ded0..4d5d01cb8141 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -743,15 +743,20 @@ const struct bond_option *bond_opt_get(unsigned int option)
static int bond_option_mode_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
- if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) {
- netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
- newval->string);
- /* disable arp monitoring */
- bond->params.arp_interval = 0;
- /* set miimon to default value */
- bond->params.miimon = BOND_DEFAULT_MIIMON;
- netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n",
- bond->params.miimon);
+ if (!bond_mode_uses_arp(newval->value)) {
+ if (bond->params.arp_interval) {
+ netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
+ newval->string);
+ /* disable arp monitoring */
+ bond->params.arp_interval = 0;
+ }
+
+ if (!bond->params.miimon) {
+ /* set miimon to default value */
+ bond->params.miimon = BOND_DEFAULT_MIIMON;
+ netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n",
+ bond->params.miimon);
+ }
}
if (newval->value == BOND_MODE_ALB)
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index d4dd4da23997..da636a22c542 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -73,7 +73,7 @@ MODULE_PARM_DESC(msgobj15_eff, "Extended 29-bit frames for message object 15 "
static int i82527_compat;
module_param(i82527_compat, int, 0444);
-MODULE_PARM_DESC(i82527_compat, "Strict Intel 82527 comptibility mode "
+MODULE_PARM_DESC(i82527_compat, "Strict Intel 82527 compatibility mode "
"without using additional functions");
/*
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 3c71f1cb205f..49163570a63a 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -649,8 +649,7 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
can_skb_prv(skb)->ifindex = dev->ifindex;
can_skb_prv(skb)->skbcnt = 0;
- *cf = skb_put(skb, sizeof(struct can_frame));
- memset(*cf, 0, sizeof(struct can_frame));
+ *cf = skb_put_zero(skb, sizeof(struct can_frame));
return skb;
}
@@ -678,8 +677,7 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
can_skb_prv(skb)->ifindex = dev->ifindex;
can_skb_prv(skb)->skbcnt = 0;
- *cfd = skb_put(skb, sizeof(struct canfd_frame));
- memset(*cfd, 0, sizeof(struct canfd_frame));
+ *cfd = skb_put_zero(skb, sizeof(struct canfd_frame));
return skb;
}
@@ -703,7 +701,8 @@ EXPORT_SYMBOL_GPL(alloc_can_err_skb);
/*
* Allocate and setup space for the CAN network device
*/
-struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
+struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
+ unsigned int txqs, unsigned int rxqs)
{
struct net_device *dev;
struct can_priv *priv;
@@ -715,7 +714,8 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
else
size = sizeof_priv;
- dev = alloc_netdev(size, "can%d", NET_NAME_UNKNOWN, can_setup);
+ dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup,
+ txqs, rxqs);
if (!dev)
return NULL;
@@ -734,7 +734,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
return dev;
}
-EXPORT_SYMBOL_GPL(alloc_candev);
+EXPORT_SYMBOL_GPL(alloc_candev_mqs);
/*
* Free space of the CAN network device
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index d53a45bf2a72..8e972ef08637 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -1,24 +1,13 @@
-/*
- * flexcan.c - FLEXCAN CAN controller driver
- *
- * Copyright (c) 2005-2006 Varma Electronics Oy
- * Copyright (c) 2009 Sascha Hauer, Pengutronix
- * Copyright (c) 2010-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
- * Copyright (c) 2014 David Jander, Protonic Holland
- *
- * Based on code originally by Andrey Volkov <avolkov@varma-el.com>
- *
- * LICENCE:
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// flexcan.c - FLEXCAN CAN controller driver
+//
+// Copyright (c) 2005-2006 Varma Electronics Oy
+// Copyright (c) 2009 Sascha Hauer, Pengutronix
+// Copyright (c) 2010-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+// Copyright (c) 2014 David Jander, Protonic Holland
+//
+// Based on code originally by Andrey Volkov <avolkov@varma-el.com>
#include <linux/netdevice.h>
#include <linux/can.h>
@@ -523,7 +512,7 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
return err;
}
-static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
const struct flexcan_priv *priv = netdev_priv(dev);
struct can_frame *cf = (struct can_frame *)skb->data;
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index adfdb66a486e..02042cb09bd2 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1684,7 +1684,7 @@ static int ican3_stop(struct net_device *ndev)
return 0;
}
-static int ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct ican3_dev *mod = netdev_priv(ndev);
struct can_frame *cf = (struct can_frame *)skb->data;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index b397a33f3d32..9b449400376b 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -634,10 +634,12 @@ static int m_can_clk_start(struct m_can_priv *priv)
int err;
err = pm_runtime_get_sync(priv->device);
- if (err)
+ if (err < 0) {
pm_runtime_put_noidle(priv->device);
+ return err;
+ }
- return err;
+ return 0;
}
static void m_can_clk_stop(struct m_can_priv *priv)
@@ -1109,7 +1111,8 @@ static void m_can_chip_config(struct net_device *dev)
} else {
/* Version 3.1.x or 3.2.x */
- cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE);
+ cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE |
+ CCCR_NISO);
/* Only 3.2.x has NISO Bit implemented */
if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
@@ -1642,8 +1645,6 @@ static int m_can_plat_probe(struct platform_device *pdev)
priv->can.clock.freq = clk_get_rate(cclk);
priv->mram_base = mram_addr;
- m_can_of_parse_mram(priv, mram_config_vals);
-
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -1666,6 +1667,8 @@ static int m_can_plat_probe(struct platform_device *pdev)
goto clk_disable;
}
+ m_can_of_parse_mram(priv, mram_config_vals);
+
devm_can_led_init(dev);
of_can_transceiver(dev);
@@ -1687,8 +1690,6 @@ failed_ret:
return ret;
}
-/* TODO: runtime PM with power down or sleep mode */
-
static __maybe_unused int m_can_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
@@ -1715,8 +1716,6 @@ static __maybe_unused int m_can_resume(struct device *dev)
pinctrl_pm_select_default_state(dev);
- m_can_init_ram(priv);
-
priv->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(ndev)) {
@@ -1726,6 +1725,7 @@ static __maybe_unused int m_can_resume(struct device *dev)
if (ret)
return ret;
+ m_can_init_ram(priv);
m_can_start(ndev);
netif_device_attach(ndev);
netif_start_queue(ndev);
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index c7427bdd3a4b..2949a381a94d 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -86,6 +86,11 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
return 0;
}
cdm = of_iomap(np_cdm, 0);
+ if (!cdm) {
+ of_node_put(np_cdm);
+ dev_err(&ofdev->dev, "can't map clock node!\n");
+ return 0;
+ }
if (in_8(&cdm->ipb_clk_sel) & 0x1)
freq *= 2;
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index ed8561d4a90f..5696d7e80751 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -486,7 +486,7 @@ int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv,
if (msg_size <= 0)
break;
- msg_ptr += msg_size;
+ msg_ptr += ALIGN(msg_size, 4);
}
if (msg_size < 0)
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index b9e28578bc7b..c458d5fdc8d3 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -58,6 +58,10 @@ MODULE_LICENSE("GPL v2");
#define PCIEFD_REG_SYS_VER1 0x0040 /* version reg #1 */
#define PCIEFD_REG_SYS_VER2 0x0044 /* version reg #2 */
+#define PCIEFD_FW_VERSION(x, y, z) (((u32)(x) << 24) | \
+ ((u32)(y) << 16) | \
+ ((u32)(z) << 8))
+
/* System Control Registers Bits */
#define PCIEFD_SYS_CTL_TS_RST 0x00000001 /* timestamp clock */
#define PCIEFD_SYS_CTL_CLK_EN 0x00000002 /* system clock */
@@ -170,9 +174,6 @@ struct pciefd_page {
u32 size;
};
-#define CANFD_IRQ_SET 0x00000001
-#define CANFD_TX_PATH_SET 0x00000002
-
/* CAN-FD channel object */
struct pciefd_board;
struct pciefd_can {
@@ -414,7 +415,7 @@ static int pciefd_pre_cmd(struct peak_canfd_priv *ucan)
break;
/* going into operational mode: setup IRQ handler */
- err = request_irq(priv->board->pci_dev->irq,
+ err = request_irq(priv->ucan.ndev->irq,
pciefd_irq_handler,
IRQF_SHARED,
PCIEFD_DRV_NAME,
@@ -487,15 +488,18 @@ static int pciefd_post_cmd(struct peak_canfd_priv *ucan)
/* controller now in reset mode: */
+ /* disable IRQ for this CAN */
+ pciefd_can_writereg(priv, CANFD_CTL_IEN_BIT,
+ PCIEFD_REG_CAN_RX_CTL_CLR);
+
/* stop and reset DMA addresses in Tx/Rx engines */
pciefd_can_clear_tx_dma(priv);
pciefd_can_clear_rx_dma(priv);
- /* disable IRQ for this CAN */
- pciefd_can_writereg(priv, CANFD_CTL_IEN_BIT,
- PCIEFD_REG_CAN_RX_CTL_CLR);
+ /* wait for above commands to complete (read cycle) */
+ (void)pciefd_sys_readreg(priv->board, PCIEFD_REG_SYS_VER1);
- free_irq(priv->board->pci_dev->irq, priv);
+ free_irq(priv->ucan.ndev->irq, priv);
ucan->can.state = CAN_STATE_STOPPED;
@@ -634,7 +638,7 @@ static int pciefd_can_probe(struct pciefd_board *pciefd)
GFP_KERNEL);
if (!priv->tx_dma_vaddr) {
dev_err(&pciefd->pci_dev->dev,
- "Tx dmaim_alloc_coherent(%u) failure\n",
+ "Tx dmam_alloc_coherent(%u) failure\n",
PCIEFD_TX_DMA_SIZE);
goto err_free_candev;
}
@@ -687,7 +691,7 @@ static int pciefd_can_probe(struct pciefd_board *pciefd)
pciefd->can[pciefd->can_count] = priv;
dev_info(&pciefd->pci_dev->dev, "%s at reg_base=0x%p irq=%d\n",
- ndev->name, priv->reg_base, pciefd->pci_dev->irq);
+ ndev->name, priv->reg_base, ndev->irq);
return 0;
@@ -782,6 +786,21 @@ static int peak_pciefd_probe(struct pci_dev *pdev,
"%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count,
hw_ver_major, hw_ver_minor, hw_ver_sub);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ /* FW < v3.3.0 DMA logic doesn't handle correctly the mix of 32-bit and
+ * 64-bit logical addresses: this workaround forces usage of 32-bit
+ * DMA addresses only when such a fw is detected.
+ */
+ if (PCIEFD_FW_VERSION(hw_ver_major, hw_ver_minor, hw_ver_sub) <
+ PCIEFD_FW_VERSION(3, 3, 0)) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ dev_warn(&pdev->dev,
+ "warning: can't set DMA mask %llxh (err %d)\n",
+ DMA_BIT_MASK(32), err);
+ }
+#endif
+
/* stop system clock */
pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN,
PCIEFD_REG_SYS_CTL_CLR);
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 5adc95c922ee..a97b81d1d0da 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -608,7 +608,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
writeb(0x00, cfg_base + PITA_GPIOICR);
/* Toggle reset */
writeb(0x05, cfg_base + PITA_MISC + 3);
- mdelay(5);
+ usleep_range(5000, 6000);
/* Leave parport mux mode */
writeb(0x04, cfg_base + PITA_MISC + 3);
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index 485b19c9ae47..b8c39ede7cd5 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -530,7 +530,7 @@ static int pcan_add_channels(struct pcan_pccard *card)
pcan_write_reg(card, PCC_CCR, ccr);
/* wait 2ms before unresetting channels */
- mdelay(2);
+ usleep_range(2000, 3000);
ccr &= ~PCC_CCR_RST_ALL;
pcan_write_reg(card, PCC_CCR, ccr);
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 1ac2090a1721..093fc9a529f0 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -409,7 +409,7 @@ static int sun4ican_set_mode(struct net_device *dev, enum can_mode mode)
* xx xx xx xx ff ll 00 11 22 33 44 55 66 77
* [ can_id ] [flags] [len] [can data (up to 8 bytes]
*/
-static int sun4ican_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t sun4ican_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct sun4ican_priv *priv = netdev_priv(dev);
struct can_frame *cf = (struct can_frame *)skb->data;
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index c36f4bdcbf4f..750d04d9e2ae 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -1,6 +1,12 @@
menu "CAN USB interfaces"
depends on USB
+config CAN_8DEV_USB
+ tristate "8 devices USB2CAN interface"
+ ---help---
+ This driver supports the USB2CAN interface
+ from 8 devices (http://www.8devices.com).
+
config CAN_EMS_USB
tristate "EMS CPC-USB/ARM7 CAN/USB interface"
---help---
@@ -26,7 +32,7 @@ config CAN_KVASER_USB
tristate "Kvaser CAN/USB interface"
---help---
This driver adds support for Kvaser CAN/USB devices like Kvaser
- Leaf Light and Kvaser USBcan II.
+ Leaf Light, Kvaser USBcan II and Kvaser Memorator Pro 5xHS.
The driver provides support for the following devices:
- Kvaser Leaf Light
@@ -55,12 +61,30 @@ config CAN_KVASER_USB
- Kvaser Memorator HS/HS
- Kvaser Memorator HS/LS
- Scania VCI2 (if you have the Kvaser logo on top)
+ - Kvaser BlackBird v2
+ - Kvaser Leaf Pro HS v2
+ - Kvaser Hybrid 2xCAN/LIN
+ - Kvaser Hybrid Pro 2xCAN/LIN
+ - Kvaser Memorator 2xHS v2
+ - Kvaser Memorator Pro 2xHS v2
+ - Kvaser Memorator Pro 5xHS
+ - Kvaser USBcan Light 4xHS
+ - Kvaser USBcan Pro 2xHS v2
+ - Kvaser USBcan Pro 5xHS
+ - ATI Memorator Pro 2xHS v2
+ - ATI USBcan Pro 2xHS v2
If unsure, say N.
To compile this driver as a module, choose M here: the
module will be called kvaser_usb.
+config CAN_MCBA_USB
+ tristate "Microchip CAN BUS Analyzer interface"
+ ---help---
+ This driver supports the CAN BUS Analyzer interface
+ from Microchip (http://www.microchip.com/development-tools/).
+
config CAN_PEAK_USB
tristate "PEAK PCAN-USB/USB Pro interfaces for CAN 2.0b/CAN-FD"
---help---
@@ -77,16 +101,26 @@ config CAN_PEAK_USB
(see also http://www.peak-system.com).
-config CAN_8DEV_USB
- tristate "8 devices USB2CAN interface"
- ---help---
- This driver supports the USB2CAN interface
- from 8 devices (http://www.8devices.com).
-
config CAN_MCBA_USB
tristate "Microchip CAN BUS Analyzer interface"
---help---
This driver supports the CAN BUS Analyzer interface
from Microchip (http://www.microchip.com/development-tools/).
+config CAN_UCAN
+ tristate "Theobroma Systems UCAN interface"
+ ---help---
+ This driver supports the Theobroma Systems
+ UCAN USB-CAN interface.
+
+ The UCAN driver supports the microcontroller-based USB/CAN
+ adapters from Theobroma Systems. There are two form-factors
+ that run essentially the same firmware:
+
+ * Seal: standalone USB stick
+ https://www.theobroma-systems.com/seal)
+ * Mule: integrated on the PCB of various System-on-Modules
+ from Theobroma Systems like the A31-µQ7 and the RK3399-Q7
+ (https://www.theobroma-systems.com/rk3399-q7)
+
endmenu
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index 49ac7b99ba32..aa0f17c0b2ed 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -3,10 +3,11 @@
# Makefile for the Linux Controller Area Network USB drivers.
#
+obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o
obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o
obj-$(CONFIG_CAN_GS_USB) += gs_usb.o
-obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
-obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
-obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o
+obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb/
obj-$(CONFIG_CAN_MCBA_USB) += mcba_usb.o
+obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
+obj-$(CONFIG_CAN_UCAN) += ucan.o
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 12ff0020ecd6..b7dfd4109d24 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -1072,6 +1072,7 @@ static void ems_usb_disconnect(struct usb_interface *intf)
usb_free_urb(dev->intr_urb);
kfree(dev->intr_in_buffer);
+ kfree(dev->tx_msg_buffer);
}
}
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
deleted file mode 100644
index daed57d3d209..000000000000
--- a/drivers/net/can/usb/kvaser_usb.c
+++ /dev/null
@@ -1,2085 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * Parts of this driver are based on the following:
- * - Kvaser linux leaf driver (version 4.78)
- * - CAN driver for esd CAN-USB/2
- * - Kvaser linux usbcanII driver (version 5.3)
- *
- * Copyright (C) 2002-2006 KVASER AB, Sweden. All rights reserved.
- * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
- * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
- * Copyright (C) 2015 Valeo S.A.
- */
-
-#include <linux/spinlock.h>
-#include <linux/kernel.h>
-#include <linux/completion.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/usb.h>
-
-#include <linux/can.h>
-#include <linux/can/dev.h>
-#include <linux/can/error.h>
-
-#define MAX_RX_URBS 4
-#define START_TIMEOUT 1000 /* msecs */
-#define STOP_TIMEOUT 1000 /* msecs */
-#define USB_SEND_TIMEOUT 1000 /* msecs */
-#define USB_RECV_TIMEOUT 1000 /* msecs */
-#define RX_BUFFER_SIZE 3072
-#define CAN_USB_CLOCK 8000000
-#define MAX_NET_DEVICES 3
-#define MAX_USBCAN_NET_DEVICES 2
-
-/* Kvaser Leaf USB devices */
-#define KVASER_VENDOR_ID 0x0bfd
-#define USB_LEAF_DEVEL_PRODUCT_ID 10
-#define USB_LEAF_LITE_PRODUCT_ID 11
-#define USB_LEAF_PRO_PRODUCT_ID 12
-#define USB_LEAF_SPRO_PRODUCT_ID 14
-#define USB_LEAF_PRO_LS_PRODUCT_ID 15
-#define USB_LEAF_PRO_SWC_PRODUCT_ID 16
-#define USB_LEAF_PRO_LIN_PRODUCT_ID 17
-#define USB_LEAF_SPRO_LS_PRODUCT_ID 18
-#define USB_LEAF_SPRO_SWC_PRODUCT_ID 19
-#define USB_MEMO2_DEVEL_PRODUCT_ID 22
-#define USB_MEMO2_HSHS_PRODUCT_ID 23
-#define USB_UPRO_HSHS_PRODUCT_ID 24
-#define USB_LEAF_LITE_GI_PRODUCT_ID 25
-#define USB_LEAF_PRO_OBDII_PRODUCT_ID 26
-#define USB_MEMO2_HSLS_PRODUCT_ID 27
-#define USB_LEAF_LITE_CH_PRODUCT_ID 28
-#define USB_BLACKBIRD_SPRO_PRODUCT_ID 29
-#define USB_OEM_MERCURY_PRODUCT_ID 34
-#define USB_OEM_LEAF_PRODUCT_ID 35
-#define USB_CAN_R_PRODUCT_ID 39
-#define USB_LEAF_LITE_V2_PRODUCT_ID 288
-#define USB_MINI_PCIE_HS_PRODUCT_ID 289
-#define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 290
-#define USB_USBCAN_LIGHT_2HS_PRODUCT_ID 291
-#define USB_MINI_PCIE_2HS_PRODUCT_ID 292
-
-static inline bool kvaser_is_leaf(const struct usb_device_id *id)
-{
- return id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID &&
- id->idProduct <= USB_MINI_PCIE_2HS_PRODUCT_ID;
-}
-
-/* Kvaser USBCan-II devices */
-#define USB_USBCAN_REVB_PRODUCT_ID 2
-#define USB_VCI2_PRODUCT_ID 3
-#define USB_USBCAN2_PRODUCT_ID 4
-#define USB_MEMORATOR_PRODUCT_ID 5
-
-static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
-{
- return id->idProduct >= USB_USBCAN_REVB_PRODUCT_ID &&
- id->idProduct <= USB_MEMORATOR_PRODUCT_ID;
-}
-
-/* USB devices features */
-#define KVASER_HAS_SILENT_MODE BIT(0)
-#define KVASER_HAS_TXRX_ERRORS BIT(1)
-
-/* Message header size */
-#define MSG_HEADER_LEN 2
-
-/* Can message flags */
-#define MSG_FLAG_ERROR_FRAME BIT(0)
-#define MSG_FLAG_OVERRUN BIT(1)
-#define MSG_FLAG_NERR BIT(2)
-#define MSG_FLAG_WAKEUP BIT(3)
-#define MSG_FLAG_REMOTE_FRAME BIT(4)
-#define MSG_FLAG_RESERVED BIT(5)
-#define MSG_FLAG_TX_ACK BIT(6)
-#define MSG_FLAG_TX_REQUEST BIT(7)
-
-/* Can states (M16C CxSTRH register) */
-#define M16C_STATE_BUS_RESET BIT(0)
-#define M16C_STATE_BUS_ERROR BIT(4)
-#define M16C_STATE_BUS_PASSIVE BIT(5)
-#define M16C_STATE_BUS_OFF BIT(6)
-
-/* Can msg ids */
-#define CMD_RX_STD_MESSAGE 12
-#define CMD_TX_STD_MESSAGE 13
-#define CMD_RX_EXT_MESSAGE 14
-#define CMD_TX_EXT_MESSAGE 15
-#define CMD_SET_BUS_PARAMS 16
-#define CMD_GET_BUS_PARAMS 17
-#define CMD_GET_BUS_PARAMS_REPLY 18
-#define CMD_GET_CHIP_STATE 19
-#define CMD_CHIP_STATE_EVENT 20
-#define CMD_SET_CTRL_MODE 21
-#define CMD_GET_CTRL_MODE 22
-#define CMD_GET_CTRL_MODE_REPLY 23
-#define CMD_RESET_CHIP 24
-#define CMD_RESET_CARD 25
-#define CMD_START_CHIP 26
-#define CMD_START_CHIP_REPLY 27
-#define CMD_STOP_CHIP 28
-#define CMD_STOP_CHIP_REPLY 29
-
-#define CMD_LEAF_GET_CARD_INFO2 32
-#define CMD_USBCAN_RESET_CLOCK 32
-#define CMD_USBCAN_CLOCK_OVERFLOW_EVENT 33
-
-#define CMD_GET_CARD_INFO 34
-#define CMD_GET_CARD_INFO_REPLY 35
-#define CMD_GET_SOFTWARE_INFO 38
-#define CMD_GET_SOFTWARE_INFO_REPLY 39
-#define CMD_ERROR_EVENT 45
-#define CMD_FLUSH_QUEUE 48
-#define CMD_RESET_ERROR_COUNTER 49
-#define CMD_TX_ACKNOWLEDGE 50
-#define CMD_CAN_ERROR_EVENT 51
-#define CMD_FLUSH_QUEUE_REPLY 68
-
-#define CMD_LEAF_USB_THROTTLE 77
-#define CMD_LEAF_LOG_MESSAGE 106
-
-/* error factors */
-#define M16C_EF_ACKE BIT(0)
-#define M16C_EF_CRCE BIT(1)
-#define M16C_EF_FORME BIT(2)
-#define M16C_EF_STFE BIT(3)
-#define M16C_EF_BITE0 BIT(4)
-#define M16C_EF_BITE1 BIT(5)
-#define M16C_EF_RCVE BIT(6)
-#define M16C_EF_TRE BIT(7)
-
-/* Only Leaf-based devices can report M16C error factors,
- * thus define our own error status flags for USBCANII
- */
-#define USBCAN_ERROR_STATE_NONE 0
-#define USBCAN_ERROR_STATE_TX_ERROR BIT(0)
-#define USBCAN_ERROR_STATE_RX_ERROR BIT(1)
-#define USBCAN_ERROR_STATE_BUSERROR BIT(2)
-
-/* bittiming parameters */
-#define KVASER_USB_TSEG1_MIN 1
-#define KVASER_USB_TSEG1_MAX 16
-#define KVASER_USB_TSEG2_MIN 1
-#define KVASER_USB_TSEG2_MAX 8
-#define KVASER_USB_SJW_MAX 4
-#define KVASER_USB_BRP_MIN 1
-#define KVASER_USB_BRP_MAX 64
-#define KVASER_USB_BRP_INC 1
-
-/* ctrl modes */
-#define KVASER_CTRL_MODE_NORMAL 1
-#define KVASER_CTRL_MODE_SILENT 2
-#define KVASER_CTRL_MODE_SELFRECEPTION 3
-#define KVASER_CTRL_MODE_OFF 4
-
-/* Extended CAN identifier flag */
-#define KVASER_EXTENDED_FRAME BIT(31)
-
-/* Kvaser USB CAN dongles are divided into two major families:
- * - Leaf: Based on Renesas M32C, running firmware labeled as 'filo'
- * - UsbcanII: Based on Renesas M16C, running firmware labeled as 'helios'
- */
-enum kvaser_usb_family {
- KVASER_LEAF,
- KVASER_USBCAN,
-};
-
-struct kvaser_msg_simple {
- u8 tid;
- u8 channel;
-} __packed;
-
-struct kvaser_msg_cardinfo {
- u8 tid;
- u8 nchannels;
- union {
- struct {
- __le32 serial_number;
- __le32 padding;
- } __packed leaf0;
- struct {
- __le32 serial_number_low;
- __le32 serial_number_high;
- } __packed usbcan0;
- } __packed;
- __le32 clock_resolution;
- __le32 mfgdate;
- u8 ean[8];
- u8 hw_revision;
- union {
- struct {
- u8 usb_hs_mode;
- } __packed leaf1;
- struct {
- u8 padding;
- } __packed usbcan1;
- } __packed;
- __le16 padding;
-} __packed;
-
-struct kvaser_msg_cardinfo2 {
- u8 tid;
- u8 reserved;
- u8 pcb_id[24];
- __le32 oem_unlock_code;
-} __packed;
-
-struct leaf_msg_softinfo {
- u8 tid;
- u8 padding0;
- __le32 sw_options;
- __le32 fw_version;
- __le16 max_outstanding_tx;
- __le16 padding1[9];
-} __packed;
-
-struct usbcan_msg_softinfo {
- u8 tid;
- u8 fw_name[5];
- __le16 max_outstanding_tx;
- u8 padding[6];
- __le32 fw_version;
- __le16 checksum;
- __le16 sw_options;
-} __packed;
-
-struct kvaser_msg_busparams {
- u8 tid;
- u8 channel;
- __le32 bitrate;
- u8 tseg1;
- u8 tseg2;
- u8 sjw;
- u8 no_samp;
-} __packed;
-
-struct kvaser_msg_tx_can {
- u8 channel;
- u8 tid;
- u8 msg[14];
- union {
- struct {
- u8 padding;
- u8 flags;
- } __packed leaf;
- struct {
- u8 flags;
- u8 padding;
- } __packed usbcan;
- } __packed;
-} __packed;
-
-struct kvaser_msg_rx_can_header {
- u8 channel;
- u8 flag;
-} __packed;
-
-struct leaf_msg_rx_can {
- u8 channel;
- u8 flag;
-
- __le16 time[3];
- u8 msg[14];
-} __packed;
-
-struct usbcan_msg_rx_can {
- u8 channel;
- u8 flag;
-
- u8 msg[14];
- __le16 time;
-} __packed;
-
-struct leaf_msg_chip_state_event {
- u8 tid;
- u8 channel;
-
- __le16 time[3];
- u8 tx_errors_count;
- u8 rx_errors_count;
-
- u8 status;
- u8 padding[3];
-} __packed;
-
-struct usbcan_msg_chip_state_event {
- u8 tid;
- u8 channel;
-
- u8 tx_errors_count;
- u8 rx_errors_count;
- __le16 time;
-
- u8 status;
- u8 padding[3];
-} __packed;
-
-struct kvaser_msg_tx_acknowledge_header {
- u8 channel;
- u8 tid;
-} __packed;
-
-struct leaf_msg_tx_acknowledge {
- u8 channel;
- u8 tid;
-
- __le16 time[3];
- u8 flags;
- u8 time_offset;
-} __packed;
-
-struct usbcan_msg_tx_acknowledge {
- u8 channel;
- u8 tid;
-
- __le16 time;
- __le16 padding;
-} __packed;
-
-struct leaf_msg_error_event {
- u8 tid;
- u8 flags;
- __le16 time[3];
- u8 channel;
- u8 padding;
- u8 tx_errors_count;
- u8 rx_errors_count;
- u8 status;
- u8 error_factor;
-} __packed;
-
-struct usbcan_msg_error_event {
- u8 tid;
- u8 padding;
- u8 tx_errors_count_ch0;
- u8 rx_errors_count_ch0;
- u8 tx_errors_count_ch1;
- u8 rx_errors_count_ch1;
- u8 status_ch0;
- u8 status_ch1;
- __le16 time;
-} __packed;
-
-struct kvaser_msg_ctrl_mode {
- u8 tid;
- u8 channel;
- u8 ctrl_mode;
- u8 padding[3];
-} __packed;
-
-struct kvaser_msg_flush_queue {
- u8 tid;
- u8 channel;
- u8 flags;
- u8 padding[3];
-} __packed;
-
-struct leaf_msg_log_message {
- u8 channel;
- u8 flags;
- __le16 time[3];
- u8 dlc;
- u8 time_offset;
- __le32 id;
- u8 data[8];
-} __packed;
-
-struct kvaser_msg {
- u8 len;
- u8 id;
- union {
- struct kvaser_msg_simple simple;
- struct kvaser_msg_cardinfo cardinfo;
- struct kvaser_msg_cardinfo2 cardinfo2;
- struct kvaser_msg_busparams busparams;
-
- struct kvaser_msg_rx_can_header rx_can_header;
- struct kvaser_msg_tx_acknowledge_header tx_acknowledge_header;
-
- union {
- struct leaf_msg_softinfo softinfo;
- struct leaf_msg_rx_can rx_can;
- struct leaf_msg_chip_state_event chip_state_event;
- struct leaf_msg_tx_acknowledge tx_acknowledge;
- struct leaf_msg_error_event error_event;
- struct leaf_msg_log_message log_message;
- } __packed leaf;
-
- union {
- struct usbcan_msg_softinfo softinfo;
- struct usbcan_msg_rx_can rx_can;
- struct usbcan_msg_chip_state_event chip_state_event;
- struct usbcan_msg_tx_acknowledge tx_acknowledge;
- struct usbcan_msg_error_event error_event;
- } __packed usbcan;
-
- struct kvaser_msg_tx_can tx_can;
- struct kvaser_msg_ctrl_mode ctrl_mode;
- struct kvaser_msg_flush_queue flush_queue;
- } u;
-} __packed;
-
-/* Summary of a kvaser error event, for a unified Leaf/Usbcan error
- * handling. Some discrepancies between the two families exist:
- *
- * - USBCAN firmware does not report M16C "error factors"
- * - USBCAN controllers has difficulties reporting if the raised error
- * event is for ch0 or ch1. They leave such arbitration to the OS
- * driver by letting it compare error counters with previous values
- * and decide the error event's channel. Thus for USBCAN, the channel
- * field is only advisory.
- */
-struct kvaser_usb_error_summary {
- u8 channel, status, txerr, rxerr;
- union {
- struct {
- u8 error_factor;
- } leaf;
- struct {
- u8 other_ch_status;
- u8 error_state;
- } usbcan;
- };
-};
-
-/* Context for an outstanding, not yet ACKed, transmission */
-struct kvaser_usb_tx_urb_context {
- struct kvaser_usb_net_priv *priv;
- u32 echo_index;
- int dlc;
-};
-
-struct kvaser_usb {
- struct usb_device *udev;
- struct kvaser_usb_net_priv *nets[MAX_NET_DEVICES];
-
- struct usb_endpoint_descriptor *bulk_in, *bulk_out;
- struct usb_anchor rx_submitted;
-
- /* @max_tx_urbs: Firmware-reported maximum number of outstanding,
- * not yet ACKed, transmissions on this device. This value is
- * also used as a sentinel for marking free tx contexts.
- */
- u32 fw_version;
- unsigned int nchannels;
- unsigned int max_tx_urbs;
- enum kvaser_usb_family family;
-
- bool rxinitdone;
- void *rxbuf[MAX_RX_URBS];
- dma_addr_t rxbuf_dma[MAX_RX_URBS];
-};
-
-struct kvaser_usb_net_priv {
- struct can_priv can;
- struct can_berr_counter bec;
-
- struct kvaser_usb *dev;
- struct net_device *netdev;
- int channel;
-
- struct completion start_comp, stop_comp;
- struct usb_anchor tx_submitted;
-
- spinlock_t tx_contexts_lock;
- int active_tx_contexts;
- struct kvaser_usb_tx_urb_context tx_contexts[];
-};
-
-static const struct usb_device_id kvaser_usb_table[] = {
- /* Leaf family IDs */
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LS_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_SWC_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LIN_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_LS_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_SWC_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_DEVEL_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSHS_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_UPRO_HSHS_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_OBDII_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS |
- KVASER_HAS_SILENT_MODE },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSLS_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_CH_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_SPRO_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_MERCURY_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_LEAF_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) },
-
- /* USBCANII family IDs */
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_REVB_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMORATOR_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_VCI2_PRODUCT_ID),
- .driver_info = KVASER_HAS_TXRX_ERRORS },
-
- { }
-};
-MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
-
-static inline int kvaser_usb_send_msg(const struct kvaser_usb *dev,
- struct kvaser_msg *msg)
-{
- int actual_len;
-
- return usb_bulk_msg(dev->udev,
- usb_sndbulkpipe(dev->udev,
- dev->bulk_out->bEndpointAddress),
- msg, msg->len, &actual_len,
- USB_SEND_TIMEOUT);
-}
-
-static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
- struct kvaser_msg *msg)
-{
- struct kvaser_msg *tmp;
- void *buf;
- int actual_len;
- int err;
- int pos;
- unsigned long to = jiffies + msecs_to_jiffies(USB_RECV_TIMEOUT);
-
- buf = kzalloc(RX_BUFFER_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- do {
- err = usb_bulk_msg(dev->udev,
- usb_rcvbulkpipe(dev->udev,
- dev->bulk_in->bEndpointAddress),
- buf, RX_BUFFER_SIZE, &actual_len,
- USB_RECV_TIMEOUT);
- if (err < 0)
- goto end;
-
- pos = 0;
- while (pos <= actual_len - MSG_HEADER_LEN) {
- tmp = buf + pos;
-
- /* Handle messages crossing the USB endpoint max packet
- * size boundary. Check kvaser_usb_read_bulk_callback()
- * for further details.
- */
- if (tmp->len == 0) {
- pos = round_up(pos, le16_to_cpu(dev->bulk_in->
- wMaxPacketSize));
- continue;
- }
-
- if (pos + tmp->len > actual_len) {
- dev_err_ratelimited(dev->udev->dev.parent,
- "Format error\n");
- break;
- }
-
- if (tmp->id == id) {
- memcpy(msg, tmp, tmp->len);
- goto end;
- }
-
- pos += tmp->len;
- }
- } while (time_before(jiffies, to));
-
- err = -EINVAL;
-
-end:
- kfree(buf);
-
- return err;
-}
-
-static int kvaser_usb_send_simple_msg(const struct kvaser_usb *dev,
- u8 msg_id, int channel)
-{
- struct kvaser_msg *msg;
- int rc;
-
- msg = kmalloc(sizeof(*msg), GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- msg->id = msg_id;
- msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_simple);
- msg->u.simple.channel = channel;
- msg->u.simple.tid = 0xff;
-
- rc = kvaser_usb_send_msg(dev, msg);
-
- kfree(msg);
- return rc;
-}
-
-static int kvaser_usb_get_software_info(struct kvaser_usb *dev)
-{
- struct kvaser_msg msg;
- int err;
-
- err = kvaser_usb_send_simple_msg(dev, CMD_GET_SOFTWARE_INFO, 0);
- if (err)
- return err;
-
- err = kvaser_usb_wait_msg(dev, CMD_GET_SOFTWARE_INFO_REPLY, &msg);
- if (err)
- return err;
-
- switch (dev->family) {
- case KVASER_LEAF:
- dev->fw_version = le32_to_cpu(msg.u.leaf.softinfo.fw_version);
- dev->max_tx_urbs =
- le16_to_cpu(msg.u.leaf.softinfo.max_outstanding_tx);
- break;
- case KVASER_USBCAN:
- dev->fw_version = le32_to_cpu(msg.u.usbcan.softinfo.fw_version);
- dev->max_tx_urbs =
- le16_to_cpu(msg.u.usbcan.softinfo.max_outstanding_tx);
- break;
- }
-
- return 0;
-}
-
-static int kvaser_usb_get_card_info(struct kvaser_usb *dev)
-{
- struct kvaser_msg msg;
- int err;
-
- err = kvaser_usb_send_simple_msg(dev, CMD_GET_CARD_INFO, 0);
- if (err)
- return err;
-
- err = kvaser_usb_wait_msg(dev, CMD_GET_CARD_INFO_REPLY, &msg);
- if (err)
- return err;
-
- dev->nchannels = msg.u.cardinfo.nchannels;
- if ((dev->nchannels > MAX_NET_DEVICES) ||
- (dev->family == KVASER_USBCAN &&
- dev->nchannels > MAX_USBCAN_NET_DEVICES))
- return -EINVAL;
-
- return 0;
-}
-
-static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
- const struct kvaser_msg *msg)
-{
- struct net_device_stats *stats;
- struct kvaser_usb_tx_urb_context *context;
- struct kvaser_usb_net_priv *priv;
- struct sk_buff *skb;
- struct can_frame *cf;
- unsigned long flags;
- u8 channel, tid;
-
- channel = msg->u.tx_acknowledge_header.channel;
- tid = msg->u.tx_acknowledge_header.tid;
-
- if (channel >= dev->nchannels) {
- dev_err(dev->udev->dev.parent,
- "Invalid channel number (%d)\n", channel);
- return;
- }
-
- priv = dev->nets[channel];
-
- if (!netif_device_present(priv->netdev))
- return;
-
- stats = &priv->netdev->stats;
-
- context = &priv->tx_contexts[tid % dev->max_tx_urbs];
-
- /* Sometimes the state change doesn't come after a bus-off event */
- if (priv->can.restart_ms &&
- (priv->can.state >= CAN_STATE_BUS_OFF)) {
- skb = alloc_can_err_skb(priv->netdev, &cf);
- if (skb) {
- cf->can_id |= CAN_ERR_RESTARTED;
-
- stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
- netif_rx(skb);
- } else {
- netdev_err(priv->netdev,
- "No memory left for err_skb\n");
- }
-
- priv->can.can_stats.restarts++;
- netif_carrier_on(priv->netdev);
-
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
- }
-
- stats->tx_packets++;
- stats->tx_bytes += context->dlc;
-
- spin_lock_irqsave(&priv->tx_contexts_lock, flags);
-
- can_get_echo_skb(priv->netdev, context->echo_index);
- context->echo_index = dev->max_tx_urbs;
- --priv->active_tx_contexts;
- netif_wake_queue(priv->netdev);
-
- spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
-}
-
-static void kvaser_usb_simple_msg_callback(struct urb *urb)
-{
- struct net_device *netdev = urb->context;
-
- kfree(urb->transfer_buffer);
-
- if (urb->status)
- netdev_warn(netdev, "urb status received: %d\n",
- urb->status);
-}
-
-static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
- u8 msg_id)
-{
- struct kvaser_usb *dev = priv->dev;
- struct net_device *netdev = priv->netdev;
- struct kvaser_msg *msg;
- struct urb *urb;
- void *buf;
- int err;
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb)
- return -ENOMEM;
-
- buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
- if (!buf) {
- usb_free_urb(urb);
- return -ENOMEM;
- }
-
- msg = (struct kvaser_msg *)buf;
- msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_simple);
- msg->id = msg_id;
- msg->u.simple.channel = priv->channel;
-
- usb_fill_bulk_urb(urb, dev->udev,
- usb_sndbulkpipe(dev->udev,
- dev->bulk_out->bEndpointAddress),
- buf, msg->len,
- kvaser_usb_simple_msg_callback, netdev);
- usb_anchor_urb(urb, &priv->tx_submitted);
-
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err) {
- netdev_err(netdev, "Error transmitting URB\n");
- usb_unanchor_urb(urb);
- kfree(buf);
- usb_free_urb(urb);
- return err;
- }
-
- usb_free_urb(urb);
-
- return 0;
-}
-
-static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
- const struct kvaser_usb_error_summary *es,
- struct can_frame *cf)
-{
- struct kvaser_usb *dev = priv->dev;
- struct net_device_stats *stats = &priv->netdev->stats;
- enum can_state cur_state, new_state, tx_state, rx_state;
-
- netdev_dbg(priv->netdev, "Error status: 0x%02x\n", es->status);
-
- new_state = cur_state = priv->can.state;
-
- if (es->status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET))
- new_state = CAN_STATE_BUS_OFF;
- else if (es->status & M16C_STATE_BUS_PASSIVE)
- new_state = CAN_STATE_ERROR_PASSIVE;
- else if (es->status & M16C_STATE_BUS_ERROR) {
- /* Guard against spurious error events after a busoff */
- if (cur_state < CAN_STATE_BUS_OFF) {
- if ((es->txerr >= 128) || (es->rxerr >= 128))
- new_state = CAN_STATE_ERROR_PASSIVE;
- else if ((es->txerr >= 96) || (es->rxerr >= 96))
- new_state = CAN_STATE_ERROR_WARNING;
- else if (cur_state > CAN_STATE_ERROR_ACTIVE)
- new_state = CAN_STATE_ERROR_ACTIVE;
- }
- }
-
- if (!es->status)
- new_state = CAN_STATE_ERROR_ACTIVE;
-
- if (new_state != cur_state) {
- tx_state = (es->txerr >= es->rxerr) ? new_state : 0;
- rx_state = (es->txerr <= es->rxerr) ? new_state : 0;
-
- can_change_state(priv->netdev, cf, tx_state, rx_state);
- }
-
- if (priv->can.restart_ms &&
- (cur_state >= CAN_STATE_BUS_OFF) &&
- (new_state < CAN_STATE_BUS_OFF)) {
- priv->can.can_stats.restarts++;
- }
-
- switch (dev->family) {
- case KVASER_LEAF:
- if (es->leaf.error_factor) {
- priv->can.can_stats.bus_error++;
- stats->rx_errors++;
- }
- break;
- case KVASER_USBCAN:
- if (es->usbcan.error_state & USBCAN_ERROR_STATE_TX_ERROR)
- stats->tx_errors++;
- if (es->usbcan.error_state & USBCAN_ERROR_STATE_RX_ERROR)
- stats->rx_errors++;
- if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR) {
- priv->can.can_stats.bus_error++;
- }
- break;
- }
-
- priv->bec.txerr = es->txerr;
- priv->bec.rxerr = es->rxerr;
-}
-
-static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
- const struct kvaser_usb_error_summary *es)
-{
- struct can_frame *cf, tmp_cf = { .can_id = CAN_ERR_FLAG, .can_dlc = CAN_ERR_DLC };
- struct sk_buff *skb;
- struct net_device_stats *stats;
- struct kvaser_usb_net_priv *priv;
- enum can_state old_state, new_state;
-
- if (es->channel >= dev->nchannels) {
- dev_err(dev->udev->dev.parent,
- "Invalid channel number (%d)\n", es->channel);
- return;
- }
-
- priv = dev->nets[es->channel];
- stats = &priv->netdev->stats;
-
- /* Update all of the can interface's state and error counters before
- * trying any memory allocation that can actually fail with -ENOMEM.
- *
- * We send a temporary stack-allocated error can frame to
- * can_change_state() for the very same reason.
- *
- * TODO: Split can_change_state() responsibility between updating the
- * can interface's state and counters, and the setting up of can error
- * frame ID and data to userspace. Remove stack allocation afterwards.
- */
- old_state = priv->can.state;
- kvaser_usb_rx_error_update_can_state(priv, es, &tmp_cf);
- new_state = priv->can.state;
-
- skb = alloc_can_err_skb(priv->netdev, &cf);
- if (!skb) {
- stats->rx_dropped++;
- return;
- }
- memcpy(cf, &tmp_cf, sizeof(*cf));
-
- if (new_state != old_state) {
- if (es->status &
- (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
- if (!priv->can.restart_ms)
- kvaser_usb_simple_msg_async(priv, CMD_STOP_CHIP);
- netif_carrier_off(priv->netdev);
- }
-
- if (priv->can.restart_ms &&
- (old_state >= CAN_STATE_BUS_OFF) &&
- (new_state < CAN_STATE_BUS_OFF)) {
- cf->can_id |= CAN_ERR_RESTARTED;
- netif_carrier_on(priv->netdev);
- }
- }
-
- switch (dev->family) {
- case KVASER_LEAF:
- if (es->leaf.error_factor) {
- cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
-
- if (es->leaf.error_factor & M16C_EF_ACKE)
- cf->data[3] = CAN_ERR_PROT_LOC_ACK;
- if (es->leaf.error_factor & M16C_EF_CRCE)
- cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
- if (es->leaf.error_factor & M16C_EF_FORME)
- cf->data[2] |= CAN_ERR_PROT_FORM;
- if (es->leaf.error_factor & M16C_EF_STFE)
- cf->data[2] |= CAN_ERR_PROT_STUFF;
- if (es->leaf.error_factor & M16C_EF_BITE0)
- cf->data[2] |= CAN_ERR_PROT_BIT0;
- if (es->leaf.error_factor & M16C_EF_BITE1)
- cf->data[2] |= CAN_ERR_PROT_BIT1;
- if (es->leaf.error_factor & M16C_EF_TRE)
- cf->data[2] |= CAN_ERR_PROT_TX;
- }
- break;
- case KVASER_USBCAN:
- if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR) {
- cf->can_id |= CAN_ERR_BUSERROR;
- }
- break;
- }
-
- cf->data[6] = es->txerr;
- cf->data[7] = es->rxerr;
-
- stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
- netif_rx(skb);
-}
-
-/* For USBCAN, report error to userspace iff the channels's errors counter
- * has changed, or we're the only channel seeing a bus error state.
- */
-static void kvaser_usbcan_conditionally_rx_error(const struct kvaser_usb *dev,
- struct kvaser_usb_error_summary *es)
-{
- struct kvaser_usb_net_priv *priv;
- int channel;
- bool report_error;
-
- channel = es->channel;
- if (channel >= dev->nchannels) {
- dev_err(dev->udev->dev.parent,
- "Invalid channel number (%d)\n", channel);
- return;
- }
-
- priv = dev->nets[channel];
- report_error = false;
-
- if (es->txerr != priv->bec.txerr) {
- es->usbcan.error_state |= USBCAN_ERROR_STATE_TX_ERROR;
- report_error = true;
- }
- if (es->rxerr != priv->bec.rxerr) {
- es->usbcan.error_state |= USBCAN_ERROR_STATE_RX_ERROR;
- report_error = true;
- }
- if ((es->status & M16C_STATE_BUS_ERROR) &&
- !(es->usbcan.other_ch_status & M16C_STATE_BUS_ERROR)) {
- es->usbcan.error_state |= USBCAN_ERROR_STATE_BUSERROR;
- report_error = true;
- }
-
- if (report_error)
- kvaser_usb_rx_error(dev, es);
-}
-
-static void kvaser_usbcan_rx_error(const struct kvaser_usb *dev,
- const struct kvaser_msg *msg)
-{
- struct kvaser_usb_error_summary es = { };
-
- switch (msg->id) {
- /* Sometimes errors are sent as unsolicited chip state events */
- case CMD_CHIP_STATE_EVENT:
- es.channel = msg->u.usbcan.chip_state_event.channel;
- es.status = msg->u.usbcan.chip_state_event.status;
- es.txerr = msg->u.usbcan.chip_state_event.tx_errors_count;
- es.rxerr = msg->u.usbcan.chip_state_event.rx_errors_count;
- kvaser_usbcan_conditionally_rx_error(dev, &es);
- break;
-
- case CMD_CAN_ERROR_EVENT:
- es.channel = 0;
- es.status = msg->u.usbcan.error_event.status_ch0;
- es.txerr = msg->u.usbcan.error_event.tx_errors_count_ch0;
- es.rxerr = msg->u.usbcan.error_event.rx_errors_count_ch0;
- es.usbcan.other_ch_status =
- msg->u.usbcan.error_event.status_ch1;
- kvaser_usbcan_conditionally_rx_error(dev, &es);
-
- /* The USBCAN firmware supports up to 2 channels.
- * Now that ch0 was checked, check if ch1 has any errors.
- */
- if (dev->nchannels == MAX_USBCAN_NET_DEVICES) {
- es.channel = 1;
- es.status = msg->u.usbcan.error_event.status_ch1;
- es.txerr = msg->u.usbcan.error_event.tx_errors_count_ch1;
- es.rxerr = msg->u.usbcan.error_event.rx_errors_count_ch1;
- es.usbcan.other_ch_status =
- msg->u.usbcan.error_event.status_ch0;
- kvaser_usbcan_conditionally_rx_error(dev, &es);
- }
- break;
-
- default:
- dev_err(dev->udev->dev.parent, "Invalid msg id (%d)\n",
- msg->id);
- }
-}
-
-static void kvaser_leaf_rx_error(const struct kvaser_usb *dev,
- const struct kvaser_msg *msg)
-{
- struct kvaser_usb_error_summary es = { };
-
- switch (msg->id) {
- case CMD_CAN_ERROR_EVENT:
- es.channel = msg->u.leaf.error_event.channel;
- es.status = msg->u.leaf.error_event.status;
- es.txerr = msg->u.leaf.error_event.tx_errors_count;
- es.rxerr = msg->u.leaf.error_event.rx_errors_count;
- es.leaf.error_factor = msg->u.leaf.error_event.error_factor;
- break;
- case CMD_LEAF_LOG_MESSAGE:
- es.channel = msg->u.leaf.log_message.channel;
- es.status = msg->u.leaf.log_message.data[0];
- es.txerr = msg->u.leaf.log_message.data[2];
- es.rxerr = msg->u.leaf.log_message.data[3];
- es.leaf.error_factor = msg->u.leaf.log_message.data[1];
- break;
- case CMD_CHIP_STATE_EVENT:
- es.channel = msg->u.leaf.chip_state_event.channel;
- es.status = msg->u.leaf.chip_state_event.status;
- es.txerr = msg->u.leaf.chip_state_event.tx_errors_count;
- es.rxerr = msg->u.leaf.chip_state_event.rx_errors_count;
- es.leaf.error_factor = 0;
- break;
- default:
- dev_err(dev->udev->dev.parent, "Invalid msg id (%d)\n",
- msg->id);
- return;
- }
-
- kvaser_usb_rx_error(dev, &es);
-}
-
-static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
- const struct kvaser_msg *msg)
-{
- struct can_frame *cf;
- struct sk_buff *skb;
- struct net_device_stats *stats = &priv->netdev->stats;
-
- if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
- MSG_FLAG_NERR)) {
- netdev_err(priv->netdev, "Unknown error (flags: 0x%02x)\n",
- msg->u.rx_can_header.flag);
-
- stats->rx_errors++;
- return;
- }
-
- if (msg->u.rx_can_header.flag & MSG_FLAG_OVERRUN) {
- stats->rx_over_errors++;
- stats->rx_errors++;
-
- skb = alloc_can_err_skb(priv->netdev, &cf);
- if (!skb) {
- stats->rx_dropped++;
- return;
- }
-
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
-
- stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
- netif_rx(skb);
- }
-}
-
-static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
- const struct kvaser_msg *msg)
-{
- struct kvaser_usb_net_priv *priv;
- struct can_frame *cf;
- struct sk_buff *skb;
- struct net_device_stats *stats;
- u8 channel = msg->u.rx_can_header.channel;
- const u8 *rx_msg = NULL; /* GCC */
-
- if (channel >= dev->nchannels) {
- dev_err(dev->udev->dev.parent,
- "Invalid channel number (%d)\n", channel);
- return;
- }
-
- priv = dev->nets[channel];
- stats = &priv->netdev->stats;
-
- if ((msg->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) &&
- (dev->family == KVASER_LEAF && msg->id == CMD_LEAF_LOG_MESSAGE)) {
- kvaser_leaf_rx_error(dev, msg);
- return;
- } else if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
- MSG_FLAG_NERR |
- MSG_FLAG_OVERRUN)) {
- kvaser_usb_rx_can_err(priv, msg);
- return;
- } else if (msg->u.rx_can_header.flag & ~MSG_FLAG_REMOTE_FRAME) {
- netdev_warn(priv->netdev,
- "Unhandled frame (flags: 0x%02x)",
- msg->u.rx_can_header.flag);
- return;
- }
-
- switch (dev->family) {
- case KVASER_LEAF:
- rx_msg = msg->u.leaf.rx_can.msg;
- break;
- case KVASER_USBCAN:
- rx_msg = msg->u.usbcan.rx_can.msg;
- break;
- }
-
- skb = alloc_can_skb(priv->netdev, &cf);
- if (!skb) {
- stats->rx_dropped++;
- return;
- }
-
- if (dev->family == KVASER_LEAF && msg->id == CMD_LEAF_LOG_MESSAGE) {
- cf->can_id = le32_to_cpu(msg->u.leaf.log_message.id);
- if (cf->can_id & KVASER_EXTENDED_FRAME)
- cf->can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
- else
- cf->can_id &= CAN_SFF_MASK;
-
- cf->can_dlc = get_can_dlc(msg->u.leaf.log_message.dlc);
-
- if (msg->u.leaf.log_message.flags & MSG_FLAG_REMOTE_FRAME)
- cf->can_id |= CAN_RTR_FLAG;
- else
- memcpy(cf->data, &msg->u.leaf.log_message.data,
- cf->can_dlc);
- } else {
- cf->can_id = ((rx_msg[0] & 0x1f) << 6) | (rx_msg[1] & 0x3f);
-
- if (msg->id == CMD_RX_EXT_MESSAGE) {
- cf->can_id <<= 18;
- cf->can_id |= ((rx_msg[2] & 0x0f) << 14) |
- ((rx_msg[3] & 0xff) << 6) |
- (rx_msg[4] & 0x3f);
- cf->can_id |= CAN_EFF_FLAG;
- }
-
- cf->can_dlc = get_can_dlc(rx_msg[5]);
-
- if (msg->u.rx_can_header.flag & MSG_FLAG_REMOTE_FRAME)
- cf->can_id |= CAN_RTR_FLAG;
- else
- memcpy(cf->data, &rx_msg[6],
- cf->can_dlc);
- }
-
- stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
- netif_rx(skb);
-}
-
-static void kvaser_usb_start_chip_reply(const struct kvaser_usb *dev,
- const struct kvaser_msg *msg)
-{
- struct kvaser_usb_net_priv *priv;
- u8 channel = msg->u.simple.channel;
-
- if (channel >= dev->nchannels) {
- dev_err(dev->udev->dev.parent,
- "Invalid channel number (%d)\n", channel);
- return;
- }
-
- priv = dev->nets[channel];
-
- if (completion_done(&priv->start_comp) &&
- netif_queue_stopped(priv->netdev)) {
- netif_wake_queue(priv->netdev);
- } else {
- netif_start_queue(priv->netdev);
- complete(&priv->start_comp);
- }
-}
-
-static void kvaser_usb_stop_chip_reply(const struct kvaser_usb *dev,
- const struct kvaser_msg *msg)
-{
- struct kvaser_usb_net_priv *priv;
- u8 channel = msg->u.simple.channel;
-
- if (channel >= dev->nchannels) {
- dev_err(dev->udev->dev.parent,
- "Invalid channel number (%d)\n", channel);
- return;
- }
-
- priv = dev->nets[channel];
-
- complete(&priv->stop_comp);
-}
-
-static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
- const struct kvaser_msg *msg)
-{
- switch (msg->id) {
- case CMD_START_CHIP_REPLY:
- kvaser_usb_start_chip_reply(dev, msg);
- break;
-
- case CMD_STOP_CHIP_REPLY:
- kvaser_usb_stop_chip_reply(dev, msg);
- break;
-
- case CMD_RX_STD_MESSAGE:
- case CMD_RX_EXT_MESSAGE:
- kvaser_usb_rx_can_msg(dev, msg);
- break;
-
- case CMD_LEAF_LOG_MESSAGE:
- if (dev->family != KVASER_LEAF)
- goto warn;
- kvaser_usb_rx_can_msg(dev, msg);
- break;
-
- case CMD_CHIP_STATE_EVENT:
- case CMD_CAN_ERROR_EVENT:
- if (dev->family == KVASER_LEAF)
- kvaser_leaf_rx_error(dev, msg);
- else
- kvaser_usbcan_rx_error(dev, msg);
- break;
-
- case CMD_TX_ACKNOWLEDGE:
- kvaser_usb_tx_acknowledge(dev, msg);
- break;
-
- /* Ignored messages */
- case CMD_USBCAN_CLOCK_OVERFLOW_EVENT:
- if (dev->family != KVASER_USBCAN)
- goto warn;
- break;
-
- case CMD_FLUSH_QUEUE_REPLY:
- if (dev->family != KVASER_LEAF)
- goto warn;
- break;
-
- default:
-warn: dev_warn(dev->udev->dev.parent,
- "Unhandled message (%d)\n", msg->id);
- break;
- }
-}
-
-static void kvaser_usb_read_bulk_callback(struct urb *urb)
-{
- struct kvaser_usb *dev = urb->context;
- struct kvaser_msg *msg;
- int pos = 0;
- int err, i;
-
- switch (urb->status) {
- case 0:
- break;
- case -ENOENT:
- case -EPIPE:
- case -EPROTO:
- case -ESHUTDOWN:
- return;
- default:
- dev_info(dev->udev->dev.parent, "Rx URB aborted (%d)\n",
- urb->status);
- goto resubmit_urb;
- }
-
- while (pos <= (int)(urb->actual_length - MSG_HEADER_LEN)) {
- msg = urb->transfer_buffer + pos;
-
- /* The Kvaser firmware can only read and write messages that
- * does not cross the USB's endpoint wMaxPacketSize boundary.
- * If a follow-up command crosses such boundary, firmware puts
- * a placeholder zero-length command in its place then aligns
- * the real command to the next max packet size.
- *
- * Handle such cases or we're going to miss a significant
- * number of events in case of a heavy rx load on the bus.
- */
- if (msg->len == 0) {
- pos = round_up(pos, le16_to_cpu(dev->bulk_in->
- wMaxPacketSize));
- continue;
- }
-
- if (pos + msg->len > urb->actual_length) {
- dev_err_ratelimited(dev->udev->dev.parent,
- "Format error\n");
- break;
- }
-
- kvaser_usb_handle_message(dev, msg);
- pos += msg->len;
- }
-
-resubmit_urb:
- usb_fill_bulk_urb(urb, dev->udev,
- usb_rcvbulkpipe(dev->udev,
- dev->bulk_in->bEndpointAddress),
- urb->transfer_buffer, RX_BUFFER_SIZE,
- kvaser_usb_read_bulk_callback, dev);
-
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err == -ENODEV) {
- for (i = 0; i < dev->nchannels; i++) {
- if (!dev->nets[i])
- continue;
-
- netif_device_detach(dev->nets[i]->netdev);
- }
- } else if (err) {
- dev_err(dev->udev->dev.parent,
- "Failed resubmitting read bulk urb: %d\n", err);
- }
-
- return;
-}
-
-static int kvaser_usb_setup_rx_urbs(struct kvaser_usb *dev)
-{
- int i, err = 0;
-
- if (dev->rxinitdone)
- return 0;
-
- for (i = 0; i < MAX_RX_URBS; i++) {
- struct urb *urb = NULL;
- u8 *buf = NULL;
- dma_addr_t buf_dma;
-
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb) {
- err = -ENOMEM;
- break;
- }
-
- buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE,
- GFP_KERNEL, &buf_dma);
- if (!buf) {
- dev_warn(dev->udev->dev.parent,
- "No memory left for USB buffer\n");
- usb_free_urb(urb);
- err = -ENOMEM;
- break;
- }
-
- usb_fill_bulk_urb(urb, dev->udev,
- usb_rcvbulkpipe(dev->udev,
- dev->bulk_in->bEndpointAddress),
- buf, RX_BUFFER_SIZE,
- kvaser_usb_read_bulk_callback,
- dev);
- urb->transfer_dma = buf_dma;
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
- usb_anchor_urb(urb, &dev->rx_submitted);
-
- err = usb_submit_urb(urb, GFP_KERNEL);
- if (err) {
- usb_unanchor_urb(urb);
- usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
- buf_dma);
- usb_free_urb(urb);
- break;
- }
-
- dev->rxbuf[i] = buf;
- dev->rxbuf_dma[i] = buf_dma;
-
- usb_free_urb(urb);
- }
-
- if (i == 0) {
- dev_warn(dev->udev->dev.parent,
- "Cannot setup read URBs, error %d\n", err);
- return err;
- } else if (i < MAX_RX_URBS) {
- dev_warn(dev->udev->dev.parent,
- "RX performances may be slow\n");
- }
-
- dev->rxinitdone = true;
-
- return 0;
-}
-
-static int kvaser_usb_set_opt_mode(const struct kvaser_usb_net_priv *priv)
-{
- struct kvaser_msg *msg;
- int rc;
-
- msg = kmalloc(sizeof(*msg), GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- msg->id = CMD_SET_CTRL_MODE;
- msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_ctrl_mode);
- msg->u.ctrl_mode.tid = 0xff;
- msg->u.ctrl_mode.channel = priv->channel;
-
- if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
- msg->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_SILENT;
- else
- msg->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_NORMAL;
-
- rc = kvaser_usb_send_msg(priv->dev, msg);
-
- kfree(msg);
- return rc;
-}
-
-static int kvaser_usb_start_chip(struct kvaser_usb_net_priv *priv)
-{
- int err;
-
- init_completion(&priv->start_comp);
-
- err = kvaser_usb_send_simple_msg(priv->dev, CMD_START_CHIP,
- priv->channel);
- if (err)
- return err;
-
- if (!wait_for_completion_timeout(&priv->start_comp,
- msecs_to_jiffies(START_TIMEOUT)))
- return -ETIMEDOUT;
-
- return 0;
-}
-
-static int kvaser_usb_open(struct net_device *netdev)
-{
- struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
- struct kvaser_usb *dev = priv->dev;
- int err;
-
- err = open_candev(netdev);
- if (err)
- return err;
-
- err = kvaser_usb_setup_rx_urbs(dev);
- if (err)
- goto error;
-
- err = kvaser_usb_set_opt_mode(priv);
- if (err)
- goto error;
-
- err = kvaser_usb_start_chip(priv);
- if (err) {
- netdev_warn(netdev, "Cannot start device, error %d\n", err);
- goto error;
- }
-
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
-
- return 0;
-
-error:
- close_candev(netdev);
- return err;
-}
-
-static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
-{
- int i, max_tx_urbs;
-
- max_tx_urbs = priv->dev->max_tx_urbs;
-
- priv->active_tx_contexts = 0;
- for (i = 0; i < max_tx_urbs; i++)
- priv->tx_contexts[i].echo_index = max_tx_urbs;
-}
-
-/* This method might sleep. Do not call it in the atomic context
- * of URB completions.
- */
-static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
-{
- usb_kill_anchored_urbs(&priv->tx_submitted);
- kvaser_usb_reset_tx_urb_contexts(priv);
-}
-
-static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev)
-{
- int i;
-
- usb_kill_anchored_urbs(&dev->rx_submitted);
-
- for (i = 0; i < MAX_RX_URBS; i++)
- usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
- dev->rxbuf[i],
- dev->rxbuf_dma[i]);
-
- for (i = 0; i < dev->nchannels; i++) {
- struct kvaser_usb_net_priv *priv = dev->nets[i];
-
- if (priv)
- kvaser_usb_unlink_tx_urbs(priv);
- }
-}
-
-static int kvaser_usb_stop_chip(struct kvaser_usb_net_priv *priv)
-{
- int err;
-
- init_completion(&priv->stop_comp);
-
- err = kvaser_usb_send_simple_msg(priv->dev, CMD_STOP_CHIP,
- priv->channel);
- if (err)
- return err;
-
- if (!wait_for_completion_timeout(&priv->stop_comp,
- msecs_to_jiffies(STOP_TIMEOUT)))
- return -ETIMEDOUT;
-
- return 0;
-}
-
-static int kvaser_usb_flush_queue(struct kvaser_usb_net_priv *priv)
-{
- struct kvaser_msg *msg;
- int rc;
-
- msg = kmalloc(sizeof(*msg), GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- msg->id = CMD_FLUSH_QUEUE;
- msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_flush_queue);
- msg->u.flush_queue.channel = priv->channel;
- msg->u.flush_queue.flags = 0x00;
-
- rc = kvaser_usb_send_msg(priv->dev, msg);
-
- kfree(msg);
- return rc;
-}
-
-static int kvaser_usb_close(struct net_device *netdev)
-{
- struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
- struct kvaser_usb *dev = priv->dev;
- int err;
-
- netif_stop_queue(netdev);
-
- err = kvaser_usb_flush_queue(priv);
- if (err)
- netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
-
- err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel);
- if (err)
- netdev_warn(netdev, "Cannot reset card, error %d\n", err);
-
- err = kvaser_usb_stop_chip(priv);
- if (err)
- netdev_warn(netdev, "Cannot stop device, error %d\n", err);
-
- /* reset tx contexts */
- kvaser_usb_unlink_tx_urbs(priv);
-
- priv->can.state = CAN_STATE_STOPPED;
- close_candev(priv->netdev);
-
- return 0;
-}
-
-static void kvaser_usb_write_bulk_callback(struct urb *urb)
-{
- struct kvaser_usb_tx_urb_context *context = urb->context;
- struct kvaser_usb_net_priv *priv;
- struct net_device *netdev;
-
- if (WARN_ON(!context))
- return;
-
- priv = context->priv;
- netdev = priv->netdev;
-
- kfree(urb->transfer_buffer);
-
- if (!netif_device_present(netdev))
- return;
-
- if (urb->status)
- netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);
-}
-
-static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
- struct net_device *netdev)
-{
- struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
- struct kvaser_usb *dev = priv->dev;
- struct net_device_stats *stats = &netdev->stats;
- struct can_frame *cf = (struct can_frame *)skb->data;
- struct kvaser_usb_tx_urb_context *context = NULL;
- struct urb *urb;
- void *buf;
- struct kvaser_msg *msg;
- int i, err, ret = NETDEV_TX_OK;
- u8 *msg_tx_can_flags = NULL; /* GCC */
- unsigned long flags;
-
- if (can_dropped_invalid_skb(netdev, skb))
- return NETDEV_TX_OK;
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- stats->tx_dropped++;
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
- buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
- if (!buf) {
- stats->tx_dropped++;
- dev_kfree_skb(skb);
- goto freeurb;
- }
-
- msg = buf;
- msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_tx_can);
- msg->u.tx_can.channel = priv->channel;
-
- switch (dev->family) {
- case KVASER_LEAF:
- msg_tx_can_flags = &msg->u.tx_can.leaf.flags;
- break;
- case KVASER_USBCAN:
- msg_tx_can_flags = &msg->u.tx_can.usbcan.flags;
- break;
- }
-
- *msg_tx_can_flags = 0;
-
- if (cf->can_id & CAN_EFF_FLAG) {
- msg->id = CMD_TX_EXT_MESSAGE;
- msg->u.tx_can.msg[0] = (cf->can_id >> 24) & 0x1f;
- msg->u.tx_can.msg[1] = (cf->can_id >> 18) & 0x3f;
- msg->u.tx_can.msg[2] = (cf->can_id >> 14) & 0x0f;
- msg->u.tx_can.msg[3] = (cf->can_id >> 6) & 0xff;
- msg->u.tx_can.msg[4] = cf->can_id & 0x3f;
- } else {
- msg->id = CMD_TX_STD_MESSAGE;
- msg->u.tx_can.msg[0] = (cf->can_id >> 6) & 0x1f;
- msg->u.tx_can.msg[1] = cf->can_id & 0x3f;
- }
-
- msg->u.tx_can.msg[5] = cf->can_dlc;
- memcpy(&msg->u.tx_can.msg[6], cf->data, cf->can_dlc);
-
- if (cf->can_id & CAN_RTR_FLAG)
- *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
-
- spin_lock_irqsave(&priv->tx_contexts_lock, flags);
- for (i = 0; i < dev->max_tx_urbs; i++) {
- if (priv->tx_contexts[i].echo_index == dev->max_tx_urbs) {
- context = &priv->tx_contexts[i];
-
- context->echo_index = i;
- can_put_echo_skb(skb, netdev, context->echo_index);
- ++priv->active_tx_contexts;
- if (priv->active_tx_contexts >= dev->max_tx_urbs)
- netif_stop_queue(netdev);
-
- break;
- }
- }
- spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
-
- /* This should never happen; it implies a flow control bug */
- if (!context) {
- netdev_warn(netdev, "cannot find free context\n");
-
- kfree(buf);
- ret = NETDEV_TX_BUSY;
- goto freeurb;
- }
-
- context->priv = priv;
- context->dlc = cf->can_dlc;
-
- msg->u.tx_can.tid = context->echo_index;
-
- usb_fill_bulk_urb(urb, dev->udev,
- usb_sndbulkpipe(dev->udev,
- dev->bulk_out->bEndpointAddress),
- buf, msg->len,
- kvaser_usb_write_bulk_callback, context);
- usb_anchor_urb(urb, &priv->tx_submitted);
-
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (unlikely(err)) {
- spin_lock_irqsave(&priv->tx_contexts_lock, flags);
-
- can_free_echo_skb(netdev, context->echo_index);
- context->echo_index = dev->max_tx_urbs;
- --priv->active_tx_contexts;
- netif_wake_queue(netdev);
-
- spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
-
- usb_unanchor_urb(urb);
- kfree(buf);
-
- stats->tx_dropped++;
-
- if (err == -ENODEV)
- netif_device_detach(netdev);
- else
- netdev_warn(netdev, "Failed tx_urb %d\n", err);
-
- goto freeurb;
- }
-
- ret = NETDEV_TX_OK;
-
-freeurb:
- usb_free_urb(urb);
- return ret;
-}
-
-static const struct net_device_ops kvaser_usb_netdev_ops = {
- .ndo_open = kvaser_usb_open,
- .ndo_stop = kvaser_usb_close,
- .ndo_start_xmit = kvaser_usb_start_xmit,
- .ndo_change_mtu = can_change_mtu,
-};
-
-static const struct can_bittiming_const kvaser_usb_bittiming_const = {
- .name = "kvaser_usb",
- .tseg1_min = KVASER_USB_TSEG1_MIN,
- .tseg1_max = KVASER_USB_TSEG1_MAX,
- .tseg2_min = KVASER_USB_TSEG2_MIN,
- .tseg2_max = KVASER_USB_TSEG2_MAX,
- .sjw_max = KVASER_USB_SJW_MAX,
- .brp_min = KVASER_USB_BRP_MIN,
- .brp_max = KVASER_USB_BRP_MAX,
- .brp_inc = KVASER_USB_BRP_INC,
-};
-
-static int kvaser_usb_set_bittiming(struct net_device *netdev)
-{
- struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
- struct can_bittiming *bt = &priv->can.bittiming;
- struct kvaser_usb *dev = priv->dev;
- struct kvaser_msg *msg;
- int rc;
-
- msg = kmalloc(sizeof(*msg), GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- msg->id = CMD_SET_BUS_PARAMS;
- msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_busparams);
- msg->u.busparams.channel = priv->channel;
- msg->u.busparams.tid = 0xff;
- msg->u.busparams.bitrate = cpu_to_le32(bt->bitrate);
- msg->u.busparams.sjw = bt->sjw;
- msg->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1;
- msg->u.busparams.tseg2 = bt->phase_seg2;
-
- if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
- msg->u.busparams.no_samp = 3;
- else
- msg->u.busparams.no_samp = 1;
-
- rc = kvaser_usb_send_msg(dev, msg);
-
- kfree(msg);
- return rc;
-}
-
-static int kvaser_usb_set_mode(struct net_device *netdev,
- enum can_mode mode)
-{
- struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
- int err;
-
- switch (mode) {
- case CAN_MODE_START:
- err = kvaser_usb_simple_msg_async(priv, CMD_START_CHIP);
- if (err)
- return err;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static int kvaser_usb_get_berr_counter(const struct net_device *netdev,
- struct can_berr_counter *bec)
-{
- struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
-
- *bec = priv->bec;
-
- return 0;
-}
-
-static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
-{
- int i;
-
- for (i = 0; i < dev->nchannels; i++) {
- if (!dev->nets[i])
- continue;
-
- unregister_candev(dev->nets[i]->netdev);
- }
-
- kvaser_usb_unlink_all_urbs(dev);
-
- for (i = 0; i < dev->nchannels; i++) {
- if (!dev->nets[i])
- continue;
-
- free_candev(dev->nets[i]->netdev);
- }
-}
-
-static int kvaser_usb_init_one(struct usb_interface *intf,
- const struct usb_device_id *id, int channel)
-{
- struct kvaser_usb *dev = usb_get_intfdata(intf);
- struct net_device *netdev;
- struct kvaser_usb_net_priv *priv;
- int err;
-
- err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel);
- if (err)
- return err;
-
- netdev = alloc_candev(sizeof(*priv) +
- dev->max_tx_urbs * sizeof(*priv->tx_contexts),
- dev->max_tx_urbs);
- if (!netdev) {
- dev_err(&intf->dev, "Cannot alloc candev\n");
- return -ENOMEM;
- }
-
- priv = netdev_priv(netdev);
-
- init_usb_anchor(&priv->tx_submitted);
- init_completion(&priv->start_comp);
- init_completion(&priv->stop_comp);
-
- priv->dev = dev;
- priv->netdev = netdev;
- priv->channel = channel;
-
- spin_lock_init(&priv->tx_contexts_lock);
- kvaser_usb_reset_tx_urb_contexts(priv);
-
- priv->can.state = CAN_STATE_STOPPED;
- priv->can.clock.freq = CAN_USB_CLOCK;
- priv->can.bittiming_const = &kvaser_usb_bittiming_const;
- priv->can.do_set_bittiming = kvaser_usb_set_bittiming;
- priv->can.do_set_mode = kvaser_usb_set_mode;
- if (id->driver_info & KVASER_HAS_TXRX_ERRORS)
- priv->can.do_get_berr_counter = kvaser_usb_get_berr_counter;
- priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
- if (id->driver_info & KVASER_HAS_SILENT_MODE)
- priv->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
-
- netdev->flags |= IFF_ECHO;
-
- netdev->netdev_ops = &kvaser_usb_netdev_ops;
-
- SET_NETDEV_DEV(netdev, &intf->dev);
- netdev->dev_id = channel;
-
- dev->nets[channel] = priv;
-
- err = register_candev(netdev);
- if (err) {
- dev_err(&intf->dev, "Failed to register can device\n");
- free_candev(netdev);
- dev->nets[channel] = NULL;
- return err;
- }
-
- netdev_dbg(netdev, "device registered\n");
-
- return 0;
-}
-
-static int kvaser_usb_get_endpoints(const struct usb_interface *intf,
- struct usb_endpoint_descriptor **in,
- struct usb_endpoint_descriptor **out)
-{
- const struct usb_host_interface *iface_desc;
- struct usb_endpoint_descriptor *endpoint;
- int i;
-
- iface_desc = &intf->altsetting[0];
-
- for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
- endpoint = &iface_desc->endpoint[i].desc;
-
- if (!*in && usb_endpoint_is_bulk_in(endpoint))
- *in = endpoint;
-
- if (!*out && usb_endpoint_is_bulk_out(endpoint))
- *out = endpoint;
-
- /* use first bulk endpoint for in and out */
- if (*in && *out)
- return 0;
- }
-
- return -ENODEV;
-}
-
-static int kvaser_usb_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct kvaser_usb *dev;
- int err = -ENOMEM;
- int i, retry = 3;
-
- dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- if (kvaser_is_leaf(id)) {
- dev->family = KVASER_LEAF;
- } else if (kvaser_is_usbcan(id)) {
- dev->family = KVASER_USBCAN;
- } else {
- dev_err(&intf->dev,
- "Product ID (%d) does not belong to any known Kvaser USB family",
- id->idProduct);
- return -ENODEV;
- }
-
- err = kvaser_usb_get_endpoints(intf, &dev->bulk_in, &dev->bulk_out);
- if (err) {
- dev_err(&intf->dev, "Cannot get usb endpoint(s)");
- return err;
- }
-
- dev->udev = interface_to_usbdev(intf);
-
- init_usb_anchor(&dev->rx_submitted);
-
- usb_set_intfdata(intf, dev);
-
- /* On some x86 laptops, plugging a Kvaser device again after
- * an unplug makes the firmware always ignore the very first
- * command. For such a case, provide some room for retries
- * instead of completely exiting the driver.
- */
- do {
- err = kvaser_usb_get_software_info(dev);
- } while (--retry && err == -ETIMEDOUT);
-
- if (err) {
- dev_err(&intf->dev,
- "Cannot get software infos, error %d\n", err);
- return err;
- }
-
- dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
- ((dev->fw_version >> 24) & 0xff),
- ((dev->fw_version >> 16) & 0xff),
- (dev->fw_version & 0xffff));
-
- dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs);
-
- err = kvaser_usb_get_card_info(dev);
- if (err) {
- dev_err(&intf->dev,
- "Cannot get card infos, error %d\n", err);
- return err;
- }
-
- for (i = 0; i < dev->nchannels; i++) {
- err = kvaser_usb_init_one(intf, id, i);
- if (err) {
- kvaser_usb_remove_interfaces(dev);
- return err;
- }
- }
-
- return 0;
-}
-
-static void kvaser_usb_disconnect(struct usb_interface *intf)
-{
- struct kvaser_usb *dev = usb_get_intfdata(intf);
-
- usb_set_intfdata(intf, NULL);
-
- if (!dev)
- return;
-
- kvaser_usb_remove_interfaces(dev);
-}
-
-static struct usb_driver kvaser_usb_driver = {
- .name = "kvaser_usb",
- .probe = kvaser_usb_probe,
- .disconnect = kvaser_usb_disconnect,
- .id_table = kvaser_usb_table,
-};
-
-module_usb_driver(kvaser_usb_driver);
-
-MODULE_AUTHOR("Olivier Sobrie <olivier@sobrie.be>");
-MODULE_DESCRIPTION("CAN driver for Kvaser CAN/USB devices");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/usb/kvaser_usb/Makefile b/drivers/net/can/usb/kvaser_usb/Makefile
new file mode 100644
index 000000000000..9f41ddab6a5a
--- /dev/null
+++ b/drivers/net/can/usb/kvaser_usb/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
+kvaser_usb-y = kvaser_usb_core.o kvaser_usb_leaf.o kvaser_usb_hydra.o
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
new file mode 100644
index 000000000000..390b6bde883c
--- /dev/null
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Parts of this driver are based on the following:
+ * - Kvaser linux leaf driver (version 4.78)
+ * - CAN driver for esd CAN-USB/2
+ * - Kvaser linux usbcanII driver (version 5.3)
+ * - Kvaser linux mhydra driver (version 5.24)
+ *
+ * Copyright (C) 2002-2018 KVASER AB, Sweden. All rights reserved.
+ * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
+ * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
+ * Copyright (C) 2015 Valeo S.A.
+ */
+
+#ifndef KVASER_USB_H
+#define KVASER_USB_H
+
+/* Kvaser USB CAN dongles are divided into three major platforms:
+ * - Hydra: Running firmware labeled as 'mhydra'
+ * - Leaf: Based on Renesas M32C or Freescale i.MX28, running firmware labeled
+ * as 'filo'
+ * - UsbcanII: Based on Renesas M16C, running firmware labeled as 'helios'
+ */
+
+#include <linux/completion.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+
+#define KVASER_USB_MAX_RX_URBS 4
+#define KVASER_USB_MAX_TX_URBS 128
+#define KVASER_USB_TIMEOUT 1000 /* msecs */
+#define KVASER_USB_RX_BUFFER_SIZE 3072
+#define KVASER_USB_MAX_NET_DEVICES 5
+
+/* USB devices features */
+#define KVASER_USB_HAS_SILENT_MODE BIT(0)
+#define KVASER_USB_HAS_TXRX_ERRORS BIT(1)
+
+/* Device capabilities */
+#define KVASER_USB_CAP_BERR_CAP 0x01
+#define KVASER_USB_CAP_EXT_CAP 0x02
+#define KVASER_USB_HYDRA_CAP_EXT_CMD 0x04
+
+struct kvaser_usb_dev_cfg;
+
+enum kvaser_usb_leaf_family {
+ KVASER_LEAF,
+ KVASER_USBCAN,
+};
+
+#define KVASER_USB_HYDRA_MAX_CMD_LEN 128
+struct kvaser_usb_dev_card_data_hydra {
+ u8 channel_to_he[KVASER_USB_MAX_NET_DEVICES];
+ u8 sysdbg_he;
+ spinlock_t transid_lock; /* lock for transid */
+ u16 transid;
+ /* lock for usb_rx_leftover and usb_rx_leftover_len */
+ spinlock_t usb_rx_leftover_lock;
+ u8 usb_rx_leftover[KVASER_USB_HYDRA_MAX_CMD_LEN];
+ u8 usb_rx_leftover_len;
+};
+struct kvaser_usb_dev_card_data {
+ u32 ctrlmode_supported;
+ u32 capabilities;
+ union {
+ struct {
+ enum kvaser_usb_leaf_family family;
+ } leaf;
+ struct kvaser_usb_dev_card_data_hydra hydra;
+ };
+};
+
+/* Context for an outstanding, not yet ACKed, transmission */
+struct kvaser_usb_tx_urb_context {
+ struct kvaser_usb_net_priv *priv;
+ u32 echo_index;
+ int dlc;
+};
+
+struct kvaser_usb {
+ struct usb_device *udev;
+ struct usb_interface *intf;
+ struct kvaser_usb_net_priv *nets[KVASER_USB_MAX_NET_DEVICES];
+ const struct kvaser_usb_dev_ops *ops;
+ const struct kvaser_usb_dev_cfg *cfg;
+
+ struct usb_endpoint_descriptor *bulk_in, *bulk_out;
+ struct usb_anchor rx_submitted;
+
+ /* @max_tx_urbs: Firmware-reported maximum number of outstanding,
+ * not yet ACKed, transmissions on this device. This value is
+ * also used as a sentinel for marking free tx contexts.
+ */
+ u32 fw_version;
+ unsigned int nchannels;
+ unsigned int max_tx_urbs;
+ struct kvaser_usb_dev_card_data card_data;
+
+ bool rxinitdone;
+ void *rxbuf[KVASER_USB_MAX_RX_URBS];
+ dma_addr_t rxbuf_dma[KVASER_USB_MAX_RX_URBS];
+};
+
+struct kvaser_usb_net_priv {
+ struct can_priv can;
+ struct can_berr_counter bec;
+
+ struct kvaser_usb *dev;
+ struct net_device *netdev;
+ int channel;
+
+ struct completion start_comp, stop_comp, flush_comp;
+ struct usb_anchor tx_submitted;
+
+ spinlock_t tx_contexts_lock; /* lock for active_tx_contexts */
+ int active_tx_contexts;
+ struct kvaser_usb_tx_urb_context tx_contexts[];
+};
+
+/**
+ * struct kvaser_usb_dev_ops - Device specific functions
+ * @dev_set_mode: used for can.do_set_mode
+ * @dev_set_bittiming: used for can.do_set_bittiming
+ * @dev_set_data_bittiming: used for can.do_set_data_bittiming
+ * @dev_get_berr_counter: used for can.do_get_berr_counter
+ *
+ * @dev_setup_endpoints: setup USB in and out endpoints
+ * @dev_init_card: initialize card
+ * @dev_get_software_info: get software info
+ * @dev_get_software_details: get software details
+ * @dev_get_card_info: get card info
+ * @dev_get_capabilities: discover device capabilities
+ *
+ * @dev_set_opt_mode: set ctrlmod
+ * @dev_start_chip: start the CAN controller
+ * @dev_stop_chip: stop the CAN controller
+ * @dev_reset_chip: reset the CAN controller
+ * @dev_flush_queue: flush outstanding CAN messages
+ * @dev_read_bulk_callback: handle incoming commands
+ * @dev_frame_to_cmd: translate struct can_frame into device command
+ */
+struct kvaser_usb_dev_ops {
+ int (*dev_set_mode)(struct net_device *netdev, enum can_mode mode);
+ int (*dev_set_bittiming)(struct net_device *netdev);
+ int (*dev_set_data_bittiming)(struct net_device *netdev);
+ int (*dev_get_berr_counter)(const struct net_device *netdev,
+ struct can_berr_counter *bec);
+ int (*dev_setup_endpoints)(struct kvaser_usb *dev);
+ int (*dev_init_card)(struct kvaser_usb *dev);
+ int (*dev_get_software_info)(struct kvaser_usb *dev);
+ int (*dev_get_software_details)(struct kvaser_usb *dev);
+ int (*dev_get_card_info)(struct kvaser_usb *dev);
+ int (*dev_get_capabilities)(struct kvaser_usb *dev);
+ int (*dev_set_opt_mode)(const struct kvaser_usb_net_priv *priv);
+ int (*dev_start_chip)(struct kvaser_usb_net_priv *priv);
+ int (*dev_stop_chip)(struct kvaser_usb_net_priv *priv);
+ int (*dev_reset_chip)(struct kvaser_usb *dev, int channel);
+ int (*dev_flush_queue)(struct kvaser_usb_net_priv *priv);
+ void (*dev_read_bulk_callback)(struct kvaser_usb *dev, void *buf,
+ int len);
+ void *(*dev_frame_to_cmd)(const struct kvaser_usb_net_priv *priv,
+ const struct sk_buff *skb, int *frame_len,
+ int *cmd_len, u16 transid);
+};
+
+struct kvaser_usb_dev_cfg {
+ const struct can_clock clock;
+ const unsigned int timestamp_freq;
+ const struct can_bittiming_const * const bittiming_const;
+ const struct can_bittiming_const * const data_bittiming_const;
+};
+
+extern const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops;
+extern const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops;
+
+int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len,
+ int *actual_len);
+
+int kvaser_usb_send_cmd(const struct kvaser_usb *dev, void *cmd, int len);
+
+int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd,
+ int len);
+
+int kvaser_usb_can_rx_over_error(struct net_device *netdev);
+#endif /* KVASER_USB_H */
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
new file mode 100644
index 000000000000..b939a4c10b84
--- /dev/null
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -0,0 +1,835 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Parts of this driver are based on the following:
+ * - Kvaser linux leaf driver (version 4.78)
+ * - CAN driver for esd CAN-USB/2
+ * - Kvaser linux usbcanII driver (version 5.3)
+ * - Kvaser linux mhydra driver (version 5.24)
+ *
+ * Copyright (C) 2002-2018 KVASER AB, Sweden. All rights reserved.
+ * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
+ * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
+ * Copyright (C) 2015 Valeo S.A.
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/if.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/netlink.h>
+
+#include "kvaser_usb.h"
+
+/* Kvaser USB vendor id. */
+#define KVASER_VENDOR_ID 0x0bfd
+
+/* Kvaser Leaf USB devices product ids */
+#define USB_LEAF_DEVEL_PRODUCT_ID 10
+#define USB_LEAF_LITE_PRODUCT_ID 11
+#define USB_LEAF_PRO_PRODUCT_ID 12
+#define USB_LEAF_SPRO_PRODUCT_ID 14
+#define USB_LEAF_PRO_LS_PRODUCT_ID 15
+#define USB_LEAF_PRO_SWC_PRODUCT_ID 16
+#define USB_LEAF_PRO_LIN_PRODUCT_ID 17
+#define USB_LEAF_SPRO_LS_PRODUCT_ID 18
+#define USB_LEAF_SPRO_SWC_PRODUCT_ID 19
+#define USB_MEMO2_DEVEL_PRODUCT_ID 22
+#define USB_MEMO2_HSHS_PRODUCT_ID 23
+#define USB_UPRO_HSHS_PRODUCT_ID 24
+#define USB_LEAF_LITE_GI_PRODUCT_ID 25
+#define USB_LEAF_PRO_OBDII_PRODUCT_ID 26
+#define USB_MEMO2_HSLS_PRODUCT_ID 27
+#define USB_LEAF_LITE_CH_PRODUCT_ID 28
+#define USB_BLACKBIRD_SPRO_PRODUCT_ID 29
+#define USB_OEM_MERCURY_PRODUCT_ID 34
+#define USB_OEM_LEAF_PRODUCT_ID 35
+#define USB_CAN_R_PRODUCT_ID 39
+#define USB_LEAF_LITE_V2_PRODUCT_ID 288
+#define USB_MINI_PCIE_HS_PRODUCT_ID 289
+#define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 290
+#define USB_USBCAN_LIGHT_2HS_PRODUCT_ID 291
+#define USB_MINI_PCIE_2HS_PRODUCT_ID 292
+
+/* Kvaser USBCan-II devices product ids */
+#define USB_USBCAN_REVB_PRODUCT_ID 2
+#define USB_VCI2_PRODUCT_ID 3
+#define USB_USBCAN2_PRODUCT_ID 4
+#define USB_MEMORATOR_PRODUCT_ID 5
+
+/* Kvaser Minihydra USB devices product ids */
+#define USB_BLACKBIRD_V2_PRODUCT_ID 258
+#define USB_MEMO_PRO_5HS_PRODUCT_ID 260
+#define USB_USBCAN_PRO_5HS_PRODUCT_ID 261
+#define USB_USBCAN_LIGHT_4HS_PRODUCT_ID 262
+#define USB_LEAF_PRO_HS_V2_PRODUCT_ID 263
+#define USB_USBCAN_PRO_2HS_V2_PRODUCT_ID 264
+#define USB_MEMO_2HS_PRODUCT_ID 265
+#define USB_MEMO_PRO_2HS_V2_PRODUCT_ID 266
+#define USB_HYBRID_CANLIN_PRODUCT_ID 267
+#define USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID 268
+#define USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID 269
+#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 270
+
+static inline bool kvaser_is_leaf(const struct usb_device_id *id)
+{
+ return (id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID &&
+ id->idProduct <= USB_CAN_R_PRODUCT_ID) ||
+ (id->idProduct >= USB_LEAF_LITE_V2_PRODUCT_ID &&
+ id->idProduct <= USB_MINI_PCIE_2HS_PRODUCT_ID);
+}
+
+static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
+{
+ return id->idProduct >= USB_USBCAN_REVB_PRODUCT_ID &&
+ id->idProduct <= USB_MEMORATOR_PRODUCT_ID;
+}
+
+static inline bool kvaser_is_hydra(const struct usb_device_id *id)
+{
+ return id->idProduct >= USB_BLACKBIRD_V2_PRODUCT_ID &&
+ id->idProduct <= USB_HYBRID_PRO_CANLIN_PRODUCT_ID;
+}
+
+static const struct usb_device_id kvaser_usb_table[] = {
+ /* Leaf USB product IDs */
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LS_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_SWC_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LIN_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_LS_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_SWC_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_DEVEL_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSHS_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_UPRO_HSHS_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_OBDII_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
+ KVASER_USB_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSLS_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_CH_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_SPRO_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_MERCURY_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_LEAF_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) },
+
+ /* USBCANII USB product IDs */
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_REVB_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMORATOR_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_VCI2_PRODUCT_ID),
+ .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+
+ /* Minihydra USB product IDs */
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_V2_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_5HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_4HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_HS_V2_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID) },
+ { }
+};
+MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
+
+int kvaser_usb_send_cmd(const struct kvaser_usb *dev, void *cmd, int len)
+{
+ int actual_len; /* Not used */
+
+ return usb_bulk_msg(dev->udev,
+ usb_sndbulkpipe(dev->udev,
+ dev->bulk_out->bEndpointAddress),
+ cmd, len, &actual_len, KVASER_USB_TIMEOUT);
+}
+
+int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len,
+ int *actual_len)
+{
+ return usb_bulk_msg(dev->udev,
+ usb_rcvbulkpipe(dev->udev,
+ dev->bulk_in->bEndpointAddress),
+ cmd, len, actual_len, KVASER_USB_TIMEOUT);
+}
+
+static void kvaser_usb_send_cmd_callback(struct urb *urb)
+{
+ struct net_device *netdev = urb->context;
+
+ kfree(urb->transfer_buffer);
+
+ if (urb->status)
+ netdev_warn(netdev, "urb status received: %d\n", urb->status);
+}
+
+int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd,
+ int len)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct net_device *netdev = priv->netdev;
+ struct urb *urb;
+ int err;
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb)
+ return -ENOMEM;
+
+ usb_fill_bulk_urb(urb, dev->udev,
+ usb_sndbulkpipe(dev->udev,
+ dev->bulk_out->bEndpointAddress),
+ cmd, len, kvaser_usb_send_cmd_callback, netdev);
+ usb_anchor_urb(urb, &priv->tx_submitted);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err) {
+ netdev_err(netdev, "Error transmitting URB\n");
+ usb_unanchor_urb(urb);
+ }
+ usb_free_urb(urb);
+
+ return 0;
+}
+
+int kvaser_usb_can_rx_over_error(struct net_device *netdev)
+{
+ struct net_device_stats *stats = &netdev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+
+ skb = alloc_can_err_skb(netdev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ netdev_warn(netdev, "No memory left for err_skb\n");
+ return -ENOMEM;
+ }
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+
+ return 0;
+}
+
+static void kvaser_usb_read_bulk_callback(struct urb *urb)
+{
+ struct kvaser_usb *dev = urb->context;
+ int err;
+ unsigned int i;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -EPIPE:
+ case -EPROTO:
+ case -ESHUTDOWN:
+ return;
+ default:
+ dev_info(&dev->intf->dev, "Rx URB aborted (%d)\n", urb->status);
+ goto resubmit_urb;
+ }
+
+ dev->ops->dev_read_bulk_callback(dev, urb->transfer_buffer,
+ urb->actual_length);
+
+resubmit_urb:
+ usb_fill_bulk_urb(urb, dev->udev,
+ usb_rcvbulkpipe(dev->udev,
+ dev->bulk_in->bEndpointAddress),
+ urb->transfer_buffer, KVASER_USB_RX_BUFFER_SIZE,
+ kvaser_usb_read_bulk_callback, dev);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err == -ENODEV) {
+ for (i = 0; i < dev->nchannels; i++) {
+ if (!dev->nets[i])
+ continue;
+
+ netif_device_detach(dev->nets[i]->netdev);
+ }
+ } else if (err) {
+ dev_err(&dev->intf->dev,
+ "Failed resubmitting read bulk urb: %d\n", err);
+ }
+}
+
+static int kvaser_usb_setup_rx_urbs(struct kvaser_usb *dev)
+{
+ int i, err = 0;
+
+ if (dev->rxinitdone)
+ return 0;
+
+ for (i = 0; i < KVASER_USB_MAX_RX_URBS; i++) {
+ struct urb *urb = NULL;
+ u8 *buf = NULL;
+ dma_addr_t buf_dma;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ err = -ENOMEM;
+ break;
+ }
+
+ buf = usb_alloc_coherent(dev->udev, KVASER_USB_RX_BUFFER_SIZE,
+ GFP_KERNEL, &buf_dma);
+ if (!buf) {
+ dev_warn(&dev->intf->dev,
+ "No memory left for USB buffer\n");
+ usb_free_urb(urb);
+ err = -ENOMEM;
+ break;
+ }
+
+ usb_fill_bulk_urb(urb, dev->udev,
+ usb_rcvbulkpipe
+ (dev->udev,
+ dev->bulk_in->bEndpointAddress),
+ buf, KVASER_USB_RX_BUFFER_SIZE,
+ kvaser_usb_read_bulk_callback, dev);
+ urb->transfer_dma = buf_dma;
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ usb_anchor_urb(urb, &dev->rx_submitted);
+
+ err = usb_submit_urb(urb, GFP_KERNEL);
+ if (err) {
+ usb_unanchor_urb(urb);
+ usb_free_coherent(dev->udev,
+ KVASER_USB_RX_BUFFER_SIZE, buf,
+ buf_dma);
+ usb_free_urb(urb);
+ break;
+ }
+
+ dev->rxbuf[i] = buf;
+ dev->rxbuf_dma[i] = buf_dma;
+
+ usb_free_urb(urb);
+ }
+
+ if (i == 0) {
+ dev_warn(&dev->intf->dev, "Cannot setup read URBs, error %d\n",
+ err);
+ return err;
+ } else if (i < KVASER_USB_MAX_RX_URBS) {
+ dev_warn(&dev->intf->dev, "RX performances may be slow\n");
+ }
+
+ dev->rxinitdone = true;
+
+ return 0;
+}
+
+static int kvaser_usb_open(struct net_device *netdev)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct kvaser_usb *dev = priv->dev;
+ int err;
+
+ err = open_candev(netdev);
+ if (err)
+ return err;
+
+ err = kvaser_usb_setup_rx_urbs(dev);
+ if (err)
+ goto error;
+
+ err = dev->ops->dev_set_opt_mode(priv);
+ if (err)
+ goto error;
+
+ err = dev->ops->dev_start_chip(priv);
+ if (err) {
+ netdev_warn(netdev, "Cannot start device, error %d\n", err);
+ goto error;
+ }
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ return 0;
+
+error:
+ close_candev(netdev);
+ return err;
+}
+
+static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
+{
+ int i, max_tx_urbs;
+
+ max_tx_urbs = priv->dev->max_tx_urbs;
+
+ priv->active_tx_contexts = 0;
+ for (i = 0; i < max_tx_urbs; i++)
+ priv->tx_contexts[i].echo_index = max_tx_urbs;
+}
+
+/* This method might sleep. Do not call it in the atomic context
+ * of URB completions.
+ */
+static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
+{
+ usb_kill_anchored_urbs(&priv->tx_submitted);
+ kvaser_usb_reset_tx_urb_contexts(priv);
+}
+
+static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev)
+{
+ int i;
+
+ usb_kill_anchored_urbs(&dev->rx_submitted);
+
+ for (i = 0; i < KVASER_USB_MAX_RX_URBS; i++)
+ usb_free_coherent(dev->udev, KVASER_USB_RX_BUFFER_SIZE,
+ dev->rxbuf[i], dev->rxbuf_dma[i]);
+
+ for (i = 0; i < dev->nchannels; i++) {
+ struct kvaser_usb_net_priv *priv = dev->nets[i];
+
+ if (priv)
+ kvaser_usb_unlink_tx_urbs(priv);
+ }
+}
+
+static int kvaser_usb_close(struct net_device *netdev)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct kvaser_usb *dev = priv->dev;
+ int err;
+
+ netif_stop_queue(netdev);
+
+ err = dev->ops->dev_flush_queue(priv);
+ if (err)
+ netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
+
+ if (dev->ops->dev_reset_chip) {
+ err = dev->ops->dev_reset_chip(dev, priv->channel);
+ if (err)
+ netdev_warn(netdev, "Cannot reset card, error %d\n",
+ err);
+ }
+
+ err = dev->ops->dev_stop_chip(priv);
+ if (err)
+ netdev_warn(netdev, "Cannot stop device, error %d\n", err);
+
+ /* reset tx contexts */
+ kvaser_usb_unlink_tx_urbs(priv);
+
+ priv->can.state = CAN_STATE_STOPPED;
+ close_candev(priv->netdev);
+
+ return 0;
+}
+
+static void kvaser_usb_write_bulk_callback(struct urb *urb)
+{
+ struct kvaser_usb_tx_urb_context *context = urb->context;
+ struct kvaser_usb_net_priv *priv;
+ struct net_device *netdev;
+
+ if (WARN_ON(!context))
+ return;
+
+ priv = context->priv;
+ netdev = priv->netdev;
+
+ kfree(urb->transfer_buffer);
+
+ if (!netif_device_present(netdev))
+ return;
+
+ if (urb->status)
+ netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);
+}
+
+static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct kvaser_usb *dev = priv->dev;
+ struct net_device_stats *stats = &netdev->stats;
+ struct kvaser_usb_tx_urb_context *context = NULL;
+ struct urb *urb;
+ void *buf;
+ int cmd_len = 0;
+ int err, ret = NETDEV_TX_OK;
+ unsigned int i;
+ unsigned long flags;
+
+ if (can_dropped_invalid_skb(netdev, skb))
+ return NETDEV_TX_OK;
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ stats->tx_dropped++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ spin_lock_irqsave(&priv->tx_contexts_lock, flags);
+ for (i = 0; i < dev->max_tx_urbs; i++) {
+ if (priv->tx_contexts[i].echo_index == dev->max_tx_urbs) {
+ context = &priv->tx_contexts[i];
+
+ context->echo_index = i;
+ can_put_echo_skb(skb, netdev, context->echo_index);
+ ++priv->active_tx_contexts;
+ if (priv->active_tx_contexts >= (int)dev->max_tx_urbs)
+ netif_stop_queue(netdev);
+
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
+
+ /* This should never happen; it implies a flow control bug */
+ if (!context) {
+ netdev_warn(netdev, "cannot find free context\n");
+
+ ret = NETDEV_TX_BUSY;
+ goto freeurb;
+ }
+
+ buf = dev->ops->dev_frame_to_cmd(priv, skb, &context->dlc, &cmd_len,
+ context->echo_index);
+ if (!buf) {
+ stats->tx_dropped++;
+ dev_kfree_skb(skb);
+ spin_lock_irqsave(&priv->tx_contexts_lock, flags);
+
+ can_free_echo_skb(netdev, context->echo_index);
+ context->echo_index = dev->max_tx_urbs;
+ --priv->active_tx_contexts;
+ netif_wake_queue(netdev);
+
+ spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
+ goto freeurb;
+ }
+
+ context->priv = priv;
+
+ usb_fill_bulk_urb(urb, dev->udev,
+ usb_sndbulkpipe(dev->udev,
+ dev->bulk_out->bEndpointAddress),
+ buf, cmd_len, kvaser_usb_write_bulk_callback,
+ context);
+ usb_anchor_urb(urb, &priv->tx_submitted);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (unlikely(err)) {
+ spin_lock_irqsave(&priv->tx_contexts_lock, flags);
+
+ can_free_echo_skb(netdev, context->echo_index);
+ context->echo_index = dev->max_tx_urbs;
+ --priv->active_tx_contexts;
+ netif_wake_queue(netdev);
+
+ spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
+
+ usb_unanchor_urb(urb);
+ kfree(buf);
+
+ stats->tx_dropped++;
+
+ if (err == -ENODEV)
+ netif_device_detach(netdev);
+ else
+ netdev_warn(netdev, "Failed tx_urb %d\n", err);
+
+ goto freeurb;
+ }
+
+ ret = NETDEV_TX_OK;
+
+freeurb:
+ usb_free_urb(urb);
+ return ret;
+}
+
+static const struct net_device_ops kvaser_usb_netdev_ops = {
+ .ndo_open = kvaser_usb_open,
+ .ndo_stop = kvaser_usb_close,
+ .ndo_start_xmit = kvaser_usb_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->nchannels; i++) {
+ if (!dev->nets[i])
+ continue;
+
+ unregister_candev(dev->nets[i]->netdev);
+ }
+
+ kvaser_usb_unlink_all_urbs(dev);
+
+ for (i = 0; i < dev->nchannels; i++) {
+ if (!dev->nets[i])
+ continue;
+
+ free_candev(dev->nets[i]->netdev);
+ }
+}
+
+static int kvaser_usb_init_one(struct kvaser_usb *dev,
+ const struct usb_device_id *id, int channel)
+{
+ struct net_device *netdev;
+ struct kvaser_usb_net_priv *priv;
+ int err;
+
+ if (dev->ops->dev_reset_chip) {
+ err = dev->ops->dev_reset_chip(dev, channel);
+ if (err)
+ return err;
+ }
+
+ netdev = alloc_candev(sizeof(*priv) +
+ dev->max_tx_urbs * sizeof(*priv->tx_contexts),
+ dev->max_tx_urbs);
+ if (!netdev) {
+ dev_err(&dev->intf->dev, "Cannot alloc candev\n");
+ return -ENOMEM;
+ }
+
+ priv = netdev_priv(netdev);
+
+ init_usb_anchor(&priv->tx_submitted);
+ init_completion(&priv->start_comp);
+ init_completion(&priv->stop_comp);
+ priv->can.ctrlmode_supported = 0;
+
+ priv->dev = dev;
+ priv->netdev = netdev;
+ priv->channel = channel;
+
+ spin_lock_init(&priv->tx_contexts_lock);
+ kvaser_usb_reset_tx_urb_contexts(priv);
+
+ priv->can.state = CAN_STATE_STOPPED;
+ priv->can.clock.freq = dev->cfg->clock.freq;
+ priv->can.bittiming_const = dev->cfg->bittiming_const;
+ priv->can.do_set_bittiming = dev->ops->dev_set_bittiming;
+ priv->can.do_set_mode = dev->ops->dev_set_mode;
+ if ((id->driver_info & KVASER_USB_HAS_TXRX_ERRORS) ||
+ (priv->dev->card_data.capabilities & KVASER_USB_CAP_BERR_CAP))
+ priv->can.do_get_berr_counter = dev->ops->dev_get_berr_counter;
+ if (id->driver_info & KVASER_USB_HAS_SILENT_MODE)
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
+
+ priv->can.ctrlmode_supported |= dev->card_data.ctrlmode_supported;
+
+ if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
+ priv->can.data_bittiming_const = dev->cfg->data_bittiming_const;
+ priv->can.do_set_data_bittiming =
+ dev->ops->dev_set_data_bittiming;
+ }
+
+ netdev->flags |= IFF_ECHO;
+
+ netdev->netdev_ops = &kvaser_usb_netdev_ops;
+
+ SET_NETDEV_DEV(netdev, &dev->intf->dev);
+ netdev->dev_id = channel;
+
+ dev->nets[channel] = priv;
+
+ err = register_candev(netdev);
+ if (err) {
+ dev_err(&dev->intf->dev, "Failed to register CAN device\n");
+ free_candev(netdev);
+ dev->nets[channel] = NULL;
+ return err;
+ }
+
+ netdev_dbg(netdev, "device registered\n");
+
+ return 0;
+}
+
+static int kvaser_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct kvaser_usb *dev;
+ int err;
+ int i;
+
+ dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ if (kvaser_is_leaf(id)) {
+ dev->card_data.leaf.family = KVASER_LEAF;
+ dev->ops = &kvaser_usb_leaf_dev_ops;
+ } else if (kvaser_is_usbcan(id)) {
+ dev->card_data.leaf.family = KVASER_USBCAN;
+ dev->ops = &kvaser_usb_leaf_dev_ops;
+ } else if (kvaser_is_hydra(id)) {
+ dev->ops = &kvaser_usb_hydra_dev_ops;
+ } else {
+ dev_err(&intf->dev,
+ "Product ID (%d) is not a supported Kvaser USB device\n",
+ id->idProduct);
+ return -ENODEV;
+ }
+
+ dev->intf = intf;
+
+ err = dev->ops->dev_setup_endpoints(dev);
+ if (err) {
+ dev_err(&intf->dev, "Cannot get usb endpoint(s)");
+ return err;
+ }
+
+ dev->udev = interface_to_usbdev(intf);
+
+ init_usb_anchor(&dev->rx_submitted);
+
+ usb_set_intfdata(intf, dev);
+
+ dev->card_data.ctrlmode_supported = 0;
+ dev->card_data.capabilities = 0;
+ err = dev->ops->dev_init_card(dev);
+ if (err) {
+ dev_err(&intf->dev,
+ "Failed to initialize card, error %d\n", err);
+ return err;
+ }
+
+ err = dev->ops->dev_get_software_info(dev);
+ if (err) {
+ dev_err(&intf->dev,
+ "Cannot get software info, error %d\n", err);
+ return err;
+ }
+
+ if (dev->ops->dev_get_software_details) {
+ err = dev->ops->dev_get_software_details(dev);
+ if (err) {
+ dev_err(&intf->dev,
+ "Cannot get software details, error %d\n", err);
+ return err;
+ }
+ }
+
+ if (WARN_ON(!dev->cfg))
+ return -ENODEV;
+
+ dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
+ ((dev->fw_version >> 24) & 0xff),
+ ((dev->fw_version >> 16) & 0xff),
+ (dev->fw_version & 0xffff));
+
+ dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs);
+
+ err = dev->ops->dev_get_card_info(dev);
+ if (err) {
+ dev_err(&intf->dev, "Cannot get card info, error %d\n", err);
+ return err;
+ }
+
+ if (dev->ops->dev_get_capabilities) {
+ err = dev->ops->dev_get_capabilities(dev);
+ if (err) {
+ dev_err(&intf->dev,
+ "Cannot get capabilities, error %d\n", err);
+ kvaser_usb_remove_interfaces(dev);
+ return err;
+ }
+ }
+
+ for (i = 0; i < dev->nchannels; i++) {
+ err = kvaser_usb_init_one(dev, id, i);
+ if (err) {
+ kvaser_usb_remove_interfaces(dev);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void kvaser_usb_disconnect(struct usb_interface *intf)
+{
+ struct kvaser_usb *dev = usb_get_intfdata(intf);
+
+ usb_set_intfdata(intf, NULL);
+
+ if (!dev)
+ return;
+
+ kvaser_usb_remove_interfaces(dev);
+}
+
+static struct usb_driver kvaser_usb_driver = {
+ .name = "kvaser_usb",
+ .probe = kvaser_usb_probe,
+ .disconnect = kvaser_usb_disconnect,
+ .id_table = kvaser_usb_table,
+};
+
+module_usb_driver(kvaser_usb_driver);
+
+MODULE_AUTHOR("Olivier Sobrie <olivier@sobrie.be>");
+MODULE_AUTHOR("Kvaser AB <support@kvaser.com>");
+MODULE_DESCRIPTION("CAN driver for Kvaser CAN/USB devices");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
new file mode 100644
index 000000000000..c084bae5ec0a
--- /dev/null
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -0,0 +1,2028 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Parts of this driver are based on the following:
+ * - Kvaser linux mhydra driver (version 5.24)
+ * - CAN driver for esd CAN-USB/2
+ *
+ * Copyright (C) 2018 KVASER AB, Sweden. All rights reserved.
+ * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
+ *
+ * Known issues:
+ * - Transition from CAN_STATE_ERROR_WARNING to CAN_STATE_ERROR_ACTIVE is only
+ * reported after a call to do_get_berr_counter(), since firmware does not
+ * distinguish between ERROR_WARNING and ERROR_ACTIVE.
+ * - Hardware timestamps are not set for CAN Tx frames.
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/netlink.h>
+
+#include "kvaser_usb.h"
+
+/* Forward declarations */
+static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan;
+static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc;
+
+#define KVASER_USB_HYDRA_BULK_EP_IN_ADDR 0x82
+#define KVASER_USB_HYDRA_BULK_EP_OUT_ADDR 0x02
+
+#define KVASER_USB_HYDRA_MAX_TRANSID 0xff
+#define KVASER_USB_HYDRA_MIN_TRANSID 0x01
+
+/* Minihydra command IDs */
+#define CMD_SET_BUSPARAMS_REQ 16
+#define CMD_GET_CHIP_STATE_REQ 19
+#define CMD_CHIP_STATE_EVENT 20
+#define CMD_SET_DRIVERMODE_REQ 21
+#define CMD_START_CHIP_REQ 26
+#define CMD_START_CHIP_RESP 27
+#define CMD_STOP_CHIP_REQ 28
+#define CMD_STOP_CHIP_RESP 29
+#define CMD_TX_CAN_MESSAGE 33
+#define CMD_GET_CARD_INFO_REQ 34
+#define CMD_GET_CARD_INFO_RESP 35
+#define CMD_GET_SOFTWARE_INFO_REQ 38
+#define CMD_GET_SOFTWARE_INFO_RESP 39
+#define CMD_ERROR_EVENT 45
+#define CMD_FLUSH_QUEUE 48
+#define CMD_TX_ACKNOWLEDGE 50
+#define CMD_FLUSH_QUEUE_RESP 66
+#define CMD_SET_BUSPARAMS_FD_REQ 69
+#define CMD_SET_BUSPARAMS_FD_RESP 70
+#define CMD_SET_BUSPARAMS_RESP 85
+#define CMD_GET_CAPABILITIES_REQ 95
+#define CMD_GET_CAPABILITIES_RESP 96
+#define CMD_RX_MESSAGE 106
+#define CMD_MAP_CHANNEL_REQ 200
+#define CMD_MAP_CHANNEL_RESP 201
+#define CMD_GET_SOFTWARE_DETAILS_REQ 202
+#define CMD_GET_SOFTWARE_DETAILS_RESP 203
+#define CMD_EXTENDED 255
+
+/* Minihydra extended command IDs */
+#define CMD_TX_CAN_MESSAGE_FD 224
+#define CMD_TX_ACKNOWLEDGE_FD 225
+#define CMD_RX_MESSAGE_FD 226
+
+/* Hydra commands are handled by different threads in firmware.
+ * The threads are denoted hydra entity (HE). Each HE got a unique 6-bit
+ * address. The address is used in hydra commands to get/set source and
+ * destination HE. There are two predefined HE addresses, the remaining
+ * addresses are different between devices and firmware versions. Hence, we need
+ * to enumerate the addresses (see kvaser_usb_hydra_map_channel()).
+ */
+
+/* Well-known HE addresses */
+#define KVASER_USB_HYDRA_HE_ADDRESS_ROUTER 0x00
+#define KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL 0x3e
+
+#define KVASER_USB_HYDRA_TRANSID_CANHE 0x40
+#define KVASER_USB_HYDRA_TRANSID_SYSDBG 0x61
+
+struct kvaser_cmd_map_ch_req {
+ char name[16];
+ u8 channel;
+ u8 reserved[11];
+} __packed;
+
+struct kvaser_cmd_map_ch_res {
+ u8 he_addr;
+ u8 channel;
+ u8 reserved[26];
+} __packed;
+
+struct kvaser_cmd_card_info {
+ __le32 serial_number;
+ __le32 clock_res;
+ __le32 mfg_date;
+ __le32 ean[2];
+ u8 hw_version;
+ u8 usb_mode;
+ u8 hw_type;
+ u8 reserved0;
+ u8 nchannels;
+ u8 reserved1[3];
+} __packed;
+
+struct kvaser_cmd_sw_info {
+ u8 reserved0[8];
+ __le16 max_outstanding_tx;
+ u8 reserved1[18];
+} __packed;
+
+struct kvaser_cmd_sw_detail_req {
+ u8 use_ext_cmd;
+ u8 reserved[27];
+} __packed;
+
+/* Software detail flags */
+#define KVASER_USB_HYDRA_SW_FLAG_FW_BETA BIT(2)
+#define KVASER_USB_HYDRA_SW_FLAG_FW_BAD BIT(4)
+#define KVASER_USB_HYDRA_SW_FLAG_FREQ_80M BIT(5)
+#define KVASER_USB_HYDRA_SW_FLAG_EXT_CMD BIT(9)
+#define KVASER_USB_HYDRA_SW_FLAG_CANFD BIT(10)
+#define KVASER_USB_HYDRA_SW_FLAG_NONISO BIT(11)
+#define KVASER_USB_HYDRA_SW_FLAG_EXT_CAP BIT(12)
+struct kvaser_cmd_sw_detail_res {
+ __le32 sw_flags;
+ __le32 sw_version;
+ __le32 sw_name;
+ __le32 ean[2];
+ __le32 max_bitrate;
+ u8 reserved[4];
+} __packed;
+
+/* Sub commands for cap_req and cap_res */
+#define KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE 0x02
+#define KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT 0x05
+#define KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT 0x06
+struct kvaser_cmd_cap_req {
+ __le16 cap_cmd;
+ u8 reserved[26];
+} __packed;
+
+/* Status codes for cap_res */
+#define KVASER_USB_HYDRA_CAP_STAT_OK 0x00
+#define KVASER_USB_HYDRA_CAP_STAT_NOT_IMPL 0x01
+#define KVASER_USB_HYDRA_CAP_STAT_UNAVAIL 0x02
+struct kvaser_cmd_cap_res {
+ __le16 cap_cmd;
+ __le16 status;
+ __le32 mask;
+ __le32 value;
+ u8 reserved[16];
+} __packed;
+
+/* CMD_ERROR_EVENT error codes */
+#define KVASER_USB_HYDRA_ERROR_EVENT_CAN 0x01
+#define KVASER_USB_HYDRA_ERROR_EVENT_PARAM 0x09
+struct kvaser_cmd_error_event {
+ __le16 timestamp[3];
+ u8 reserved;
+ u8 error_code;
+ __le16 info1;
+ __le16 info2;
+} __packed;
+
+/* Chip state status flags. Used for chip_state_event and err_frame_data. */
+#define KVASER_USB_HYDRA_BUS_ERR_ACT 0x00
+#define KVASER_USB_HYDRA_BUS_ERR_PASS BIT(5)
+#define KVASER_USB_HYDRA_BUS_BUS_OFF BIT(6)
+struct kvaser_cmd_chip_state_event {
+ __le16 timestamp[3];
+ u8 tx_err_counter;
+ u8 rx_err_counter;
+ u8 bus_status;
+ u8 reserved[19];
+} __packed;
+
+/* Busparam modes */
+#define KVASER_USB_HYDRA_BUS_MODE_CAN 0x00
+#define KVASER_USB_HYDRA_BUS_MODE_CANFD_ISO 0x01
+#define KVASER_USB_HYDRA_BUS_MODE_NONISO 0x02
+struct kvaser_cmd_set_busparams {
+ __le32 bitrate;
+ u8 tseg1;
+ u8 tseg2;
+ u8 sjw;
+ u8 nsamples;
+ u8 reserved0[4];
+ __le32 bitrate_d;
+ u8 tseg1_d;
+ u8 tseg2_d;
+ u8 sjw_d;
+ u8 nsamples_d;
+ u8 canfd_mode;
+ u8 reserved1[7];
+} __packed;
+
+/* Ctrl modes */
+#define KVASER_USB_HYDRA_CTRLMODE_NORMAL 0x01
+#define KVASER_USB_HYDRA_CTRLMODE_LISTEN 0x02
+struct kvaser_cmd_set_ctrlmode {
+ u8 mode;
+ u8 reserved[27];
+} __packed;
+
+struct kvaser_err_frame_data {
+ u8 bus_status;
+ u8 reserved0;
+ u8 tx_err_counter;
+ u8 rx_err_counter;
+ u8 reserved1[4];
+} __packed;
+
+struct kvaser_cmd_rx_can {
+ u8 cmd_len;
+ u8 cmd_no;
+ u8 channel;
+ u8 flags;
+ __le16 timestamp[3];
+ u8 dlc;
+ u8 padding;
+ __le32 id;
+ union {
+ u8 data[8];
+ struct kvaser_err_frame_data err_frame_data;
+ };
+} __packed;
+
+/* Extended CAN ID flag. Used in rx_can and tx_can */
+#define KVASER_USB_HYDRA_EXTENDED_FRAME_ID BIT(31)
+struct kvaser_cmd_tx_can {
+ __le32 id;
+ u8 data[8];
+ u8 dlc;
+ u8 flags;
+ __le16 transid;
+ u8 channel;
+ u8 reserved[11];
+} __packed;
+
+struct kvaser_cmd_header {
+ u8 cmd_no;
+ /* The destination HE address is stored in 0..5 of he_addr.
+ * The upper part of source HE address is stored in 6..7 of he_addr, and
+ * the lower part is stored in 12..15 of transid.
+ */
+ u8 he_addr;
+ __le16 transid;
+} __packed;
+
+struct kvaser_cmd {
+ struct kvaser_cmd_header header;
+ union {
+ struct kvaser_cmd_map_ch_req map_ch_req;
+ struct kvaser_cmd_map_ch_res map_ch_res;
+
+ struct kvaser_cmd_card_info card_info;
+ struct kvaser_cmd_sw_info sw_info;
+ struct kvaser_cmd_sw_detail_req sw_detail_req;
+ struct kvaser_cmd_sw_detail_res sw_detail_res;
+
+ struct kvaser_cmd_cap_req cap_req;
+ struct kvaser_cmd_cap_res cap_res;
+
+ struct kvaser_cmd_error_event error_event;
+
+ struct kvaser_cmd_set_busparams set_busparams_req;
+
+ struct kvaser_cmd_chip_state_event chip_state_event;
+
+ struct kvaser_cmd_set_ctrlmode set_ctrlmode;
+
+ struct kvaser_cmd_rx_can rx_can;
+ struct kvaser_cmd_tx_can tx_can;
+ } __packed;
+} __packed;
+
+/* CAN frame flags. Used in rx_can, ext_rx_can, tx_can and ext_tx_can */
+#define KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME BIT(0)
+#define KVASER_USB_HYDRA_CF_FLAG_OVERRUN BIT(1)
+#define KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME BIT(4)
+#define KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID BIT(5)
+/* CAN frame flags. Used in ext_rx_can and ext_tx_can */
+#define KVASER_USB_HYDRA_CF_FLAG_OSM_NACK BIT(12)
+#define KVASER_USB_HYDRA_CF_FLAG_ABL BIT(13)
+#define KVASER_USB_HYDRA_CF_FLAG_FDF BIT(16)
+#define KVASER_USB_HYDRA_CF_FLAG_BRS BIT(17)
+#define KVASER_USB_HYDRA_CF_FLAG_ESI BIT(18)
+
+/* KCAN packet header macros. Used in ext_rx_can and ext_tx_can */
+#define KVASER_USB_KCAN_DATA_DLC_BITS 4
+#define KVASER_USB_KCAN_DATA_DLC_SHIFT 8
+#define KVASER_USB_KCAN_DATA_DLC_MASK \
+ GENMASK(KVASER_USB_KCAN_DATA_DLC_BITS - 1 + \
+ KVASER_USB_KCAN_DATA_DLC_SHIFT, \
+ KVASER_USB_KCAN_DATA_DLC_SHIFT)
+
+#define KVASER_USB_KCAN_DATA_BRS BIT(14)
+#define KVASER_USB_KCAN_DATA_FDF BIT(15)
+#define KVASER_USB_KCAN_DATA_OSM BIT(16)
+#define KVASER_USB_KCAN_DATA_AREQ BIT(31)
+#define KVASER_USB_KCAN_DATA_SRR BIT(31)
+#define KVASER_USB_KCAN_DATA_RTR BIT(29)
+#define KVASER_USB_KCAN_DATA_IDE BIT(30)
+struct kvaser_cmd_ext_rx_can {
+ __le32 flags;
+ __le32 id;
+ __le32 kcan_id;
+ __le32 kcan_header;
+ __le64 timestamp;
+ union {
+ u8 kcan_payload[64];
+ struct kvaser_err_frame_data err_frame_data;
+ };
+} __packed;
+
+struct kvaser_cmd_ext_tx_can {
+ __le32 flags;
+ __le32 id;
+ __le32 kcan_id;
+ __le32 kcan_header;
+ u8 databytes;
+ u8 dlc;
+ u8 reserved[6];
+ u8 kcan_payload[64];
+} __packed;
+
+struct kvaser_cmd_ext_tx_ack {
+ __le32 flags;
+ u8 reserved0[4];
+ __le64 timestamp;
+ u8 reserved1[8];
+} __packed;
+
+/* struct for extended commands (CMD_EXTENDED) */
+struct kvaser_cmd_ext {
+ struct kvaser_cmd_header header;
+ __le16 len;
+ u8 cmd_no_ext;
+ u8 reserved;
+
+ union {
+ struct kvaser_cmd_ext_rx_can rx_can;
+ struct kvaser_cmd_ext_tx_can tx_can;
+ struct kvaser_cmd_ext_tx_ack tx_ack;
+ } __packed;
+} __packed;
+
+static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = {
+ .name = "kvaser_usb_kcan",
+ .tseg1_min = 1,
+ .tseg1_max = 255,
+ .tseg2_min = 1,
+ .tseg2_max = 32,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 4096,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const kvaser_usb_hydra_flexc_bittiming_c = {
+ .name = "kvaser_usb_flex",
+ .tseg1_min = 4,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
+#define KVASER_USB_HYDRA_TRANSID_BITS 12
+#define KVASER_USB_HYDRA_TRANSID_MASK \
+ GENMASK(KVASER_USB_HYDRA_TRANSID_BITS - 1, 0)
+#define KVASER_USB_HYDRA_HE_ADDR_SRC_MASK GENMASK(7, 6)
+#define KVASER_USB_HYDRA_HE_ADDR_DEST_MASK GENMASK(5, 0)
+#define KVASER_USB_HYDRA_HE_ADDR_SRC_BITS 2
+static inline u16 kvaser_usb_hydra_get_cmd_transid(const struct kvaser_cmd *cmd)
+{
+ return le16_to_cpu(cmd->header.transid) & KVASER_USB_HYDRA_TRANSID_MASK;
+}
+
+static inline void kvaser_usb_hydra_set_cmd_transid(struct kvaser_cmd *cmd,
+ u16 transid)
+{
+ cmd->header.transid =
+ cpu_to_le16(transid & KVASER_USB_HYDRA_TRANSID_MASK);
+}
+
+static inline u8 kvaser_usb_hydra_get_cmd_src_he(const struct kvaser_cmd *cmd)
+{
+ return (cmd->header.he_addr & KVASER_USB_HYDRA_HE_ADDR_SRC_MASK) >>
+ KVASER_USB_HYDRA_HE_ADDR_SRC_BITS |
+ le16_to_cpu(cmd->header.transid) >>
+ KVASER_USB_HYDRA_TRANSID_BITS;
+}
+
+static inline void kvaser_usb_hydra_set_cmd_dest_he(struct kvaser_cmd *cmd,
+ u8 dest_he)
+{
+ cmd->header.he_addr =
+ (cmd->header.he_addr & KVASER_USB_HYDRA_HE_ADDR_SRC_MASK) |
+ (dest_he & KVASER_USB_HYDRA_HE_ADDR_DEST_MASK);
+}
+
+static u8 kvaser_usb_hydra_channel_from_cmd(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ int i;
+ u8 channel = 0xff;
+ u8 src_he = kvaser_usb_hydra_get_cmd_src_he(cmd);
+
+ for (i = 0; i < KVASER_USB_MAX_NET_DEVICES; i++) {
+ if (dev->card_data.hydra.channel_to_he[i] == src_he) {
+ channel = i;
+ break;
+ }
+ }
+
+ return channel;
+}
+
+static u16 kvaser_usb_hydra_get_next_transid(struct kvaser_usb *dev)
+{
+ unsigned long flags;
+ u16 transid;
+ struct kvaser_usb_dev_card_data_hydra *card_data =
+ &dev->card_data.hydra;
+
+ spin_lock_irqsave(&card_data->transid_lock, flags);
+ transid = card_data->transid;
+ if (transid >= KVASER_USB_HYDRA_MAX_TRANSID)
+ transid = KVASER_USB_HYDRA_MIN_TRANSID;
+ else
+ transid++;
+ card_data->transid = transid;
+ spin_unlock_irqrestore(&card_data->transid_lock, flags);
+
+ return transid;
+}
+
+static size_t kvaser_usb_hydra_cmd_size(struct kvaser_cmd *cmd)
+{
+ size_t ret;
+
+ if (cmd->header.cmd_no == CMD_EXTENDED)
+ ret = le16_to_cpu(((struct kvaser_cmd_ext *)cmd)->len);
+ else
+ ret = sizeof(struct kvaser_cmd);
+
+ return ret;
+}
+
+static struct kvaser_usb_net_priv *
+kvaser_usb_hydra_net_priv_from_cmd(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv = NULL;
+ u8 channel = kvaser_usb_hydra_channel_from_cmd(dev, cmd);
+
+ if (channel >= dev->nchannels)
+ dev_err(&dev->intf->dev,
+ "Invalid channel number (%d)\n", channel);
+ else
+ priv = dev->nets[channel];
+
+ return priv;
+}
+
+static ktime_t
+kvaser_usb_hydra_ktime_from_rx_cmd(const struct kvaser_usb_dev_cfg *cfg,
+ const struct kvaser_cmd *cmd)
+{
+ u64 ticks;
+
+ if (cmd->header.cmd_no == CMD_EXTENDED) {
+ struct kvaser_cmd_ext *cmd_ext = (struct kvaser_cmd_ext *)cmd;
+
+ ticks = le64_to_cpu(cmd_ext->rx_can.timestamp);
+ } else {
+ ticks = le16_to_cpu(cmd->rx_can.timestamp[0]);
+ ticks += (u64)(le16_to_cpu(cmd->rx_can.timestamp[1])) << 16;
+ ticks += (u64)(le16_to_cpu(cmd->rx_can.timestamp[2])) << 32;
+ }
+
+ return ns_to_ktime(div_u64(ticks * 1000, cfg->timestamp_freq));
+}
+
+static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
+ u8 cmd_no, int channel)
+{
+ struct kvaser_cmd *cmd;
+ int err;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = cmd_no;
+ if (channel < 0) {
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL);
+ } else {
+ if (channel >= KVASER_USB_MAX_NET_DEVICES) {
+ dev_err(&dev->intf->dev, "channel (%d) out of range.\n",
+ channel);
+ err = -EINVAL;
+ goto end;
+ }
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, dev->card_data.hydra.channel_to_he[channel]);
+ }
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+
+ err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ if (err)
+ goto end;
+
+end:
+ kfree(cmd);
+
+ return err;
+}
+
+static int
+kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv,
+ u8 cmd_no)
+{
+ struct kvaser_cmd *cmd;
+ struct kvaser_usb *dev = priv->dev;
+ int err;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = cmd_no;
+
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+
+ err = kvaser_usb_send_cmd_async(priv, cmd,
+ kvaser_usb_hydra_cmd_size(cmd));
+ if (err)
+ kfree(cmd);
+
+ return err;
+}
+
+/* This function is used for synchronously waiting on hydra control commands.
+ * Note: Compared to kvaser_usb_hydra_read_bulk_callback(), we never need to
+ * handle partial hydra commands. Since hydra control commands are always
+ * non-extended commands.
+ */
+static int kvaser_usb_hydra_wait_cmd(const struct kvaser_usb *dev, u8 cmd_no,
+ struct kvaser_cmd *cmd)
+{
+ void *buf;
+ int err;
+ unsigned long timeout = jiffies + msecs_to_jiffies(KVASER_USB_TIMEOUT);
+
+ if (cmd->header.cmd_no == CMD_EXTENDED) {
+ dev_err(&dev->intf->dev, "Wait for CMD_EXTENDED not allowed\n");
+ return -EINVAL;
+ }
+
+ buf = kzalloc(KVASER_USB_RX_BUFFER_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ do {
+ int actual_len = 0;
+ int pos = 0;
+
+ err = kvaser_usb_recv_cmd(dev, buf, KVASER_USB_RX_BUFFER_SIZE,
+ &actual_len);
+ if (err < 0)
+ goto end;
+
+ while (pos < actual_len) {
+ struct kvaser_cmd *tmp_cmd;
+ size_t cmd_len;
+
+ tmp_cmd = buf + pos;
+ cmd_len = kvaser_usb_hydra_cmd_size(tmp_cmd);
+ if (pos + cmd_len > actual_len) {
+ dev_err_ratelimited(&dev->intf->dev,
+ "Format error\n");
+ break;
+ }
+
+ if (tmp_cmd->header.cmd_no == cmd_no) {
+ memcpy(cmd, tmp_cmd, cmd_len);
+ goto end;
+ }
+ pos += cmd_len;
+ }
+ } while (time_before(jiffies, timeout));
+
+ err = -EINVAL;
+
+end:
+ kfree(buf);
+
+ return err;
+}
+
+static int kvaser_usb_hydra_map_channel_resp(struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ u8 he, channel;
+ u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd);
+ struct kvaser_usb_dev_card_data_hydra *card_data =
+ &dev->card_data.hydra;
+
+ if (transid > 0x007f || transid < 0x0040) {
+ dev_err(&dev->intf->dev,
+ "CMD_MAP_CHANNEL_RESP, invalid transid: 0x%x\n",
+ transid);
+ return -EINVAL;
+ }
+
+ switch (transid) {
+ case KVASER_USB_HYDRA_TRANSID_CANHE:
+ case KVASER_USB_HYDRA_TRANSID_CANHE + 1:
+ case KVASER_USB_HYDRA_TRANSID_CANHE + 2:
+ case KVASER_USB_HYDRA_TRANSID_CANHE + 3:
+ case KVASER_USB_HYDRA_TRANSID_CANHE + 4:
+ channel = transid & 0x000f;
+ he = cmd->map_ch_res.he_addr;
+ card_data->channel_to_he[channel] = he;
+ break;
+ case KVASER_USB_HYDRA_TRANSID_SYSDBG:
+ card_data->sysdbg_he = cmd->map_ch_res.he_addr;
+ break;
+ default:
+ dev_warn(&dev->intf->dev,
+ "Unknown CMD_MAP_CHANNEL_RESP transid=0x%x\n",
+ transid);
+ break;
+ }
+
+ return 0;
+}
+
+static int kvaser_usb_hydra_map_channel(struct kvaser_usb *dev, u16 transid,
+ u8 channel, const char *name)
+{
+ struct kvaser_cmd *cmd;
+ int err;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ strcpy(cmd->map_ch_req.name, name);
+ cmd->header.cmd_no = CMD_MAP_CHANNEL_REQ;
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ROUTER);
+ cmd->map_ch_req.channel = channel;
+
+ kvaser_usb_hydra_set_cmd_transid(cmd, transid);
+
+ err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ if (err)
+ goto end;
+
+ err = kvaser_usb_hydra_wait_cmd(dev, CMD_MAP_CHANNEL_RESP, cmd);
+ if (err)
+ goto end;
+
+ err = kvaser_usb_hydra_map_channel_resp(dev, cmd);
+ if (err)
+ goto end;
+
+end:
+ kfree(cmd);
+
+ return err;
+}
+
+static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev,
+ u16 cap_cmd_req, u16 *status)
+{
+ struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
+ struct kvaser_cmd *cmd;
+ u32 value = 0;
+ u32 mask = 0;
+ u16 cap_cmd_res;
+ int err;
+ int i;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_GET_CAPABILITIES_REQ;
+ cmd->cap_req.cap_cmd = cpu_to_le16(cap_cmd_req);
+
+ kvaser_usb_hydra_set_cmd_dest_he(cmd, card_data->hydra.sysdbg_he);
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+
+ err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ if (err)
+ goto end;
+
+ err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_CAPABILITIES_RESP, cmd);
+ if (err)
+ goto end;
+
+ *status = le16_to_cpu(cmd->cap_res.status);
+
+ if (*status != KVASER_USB_HYDRA_CAP_STAT_OK)
+ goto end;
+
+ cap_cmd_res = le16_to_cpu(cmd->cap_res.cap_cmd);
+ switch (cap_cmd_res) {
+ case KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE:
+ case KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT:
+ case KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT:
+ value = le32_to_cpu(cmd->cap_res.value);
+ mask = le32_to_cpu(cmd->cap_res.mask);
+ break;
+ default:
+ dev_warn(&dev->intf->dev, "Unknown capability command %u\n",
+ cap_cmd_res);
+ break;
+ }
+
+ for (i = 0; i < dev->nchannels; i++) {
+ if (BIT(i) & (value & mask)) {
+ switch (cap_cmd_res) {
+ case KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE:
+ card_data->ctrlmode_supported |=
+ CAN_CTRLMODE_LISTENONLY;
+ break;
+ case KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT:
+ card_data->capabilities |=
+ KVASER_USB_CAP_BERR_CAP;
+ break;
+ case KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT:
+ card_data->ctrlmode_supported |=
+ CAN_CTRLMODE_ONE_SHOT;
+ break;
+ }
+ }
+ }
+
+end:
+ kfree(cmd);
+
+ return err;
+}
+
+static void kvaser_usb_hydra_start_chip_reply(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+
+ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
+ if (!priv)
+ return;
+
+ if (completion_done(&priv->start_comp) &&
+ netif_queue_stopped(priv->netdev)) {
+ netif_wake_queue(priv->netdev);
+ } else {
+ netif_start_queue(priv->netdev);
+ complete(&priv->start_comp);
+ }
+}
+
+static void kvaser_usb_hydra_stop_chip_reply(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+
+ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
+ if (!priv)
+ return;
+
+ complete(&priv->stop_comp);
+}
+
+static void kvaser_usb_hydra_flush_queue_reply(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+
+ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
+ if (!priv)
+ return;
+
+ complete(&priv->flush_comp);
+}
+
+static void
+kvaser_usb_hydra_bus_status_to_can_state(const struct kvaser_usb_net_priv *priv,
+ u8 bus_status,
+ const struct can_berr_counter *bec,
+ enum can_state *new_state)
+{
+ if (bus_status & KVASER_USB_HYDRA_BUS_BUS_OFF) {
+ *new_state = CAN_STATE_BUS_OFF;
+ } else if (bus_status & KVASER_USB_HYDRA_BUS_ERR_PASS) {
+ *new_state = CAN_STATE_ERROR_PASSIVE;
+ } else if (bus_status == KVASER_USB_HYDRA_BUS_ERR_ACT) {
+ if (bec->txerr >= 128 || bec->rxerr >= 128) {
+ netdev_warn(priv->netdev,
+ "ERR_ACTIVE but err tx=%u or rx=%u >=128\n",
+ bec->txerr, bec->rxerr);
+ *new_state = CAN_STATE_ERROR_PASSIVE;
+ } else if (bec->txerr >= 96 || bec->rxerr >= 96) {
+ *new_state = CAN_STATE_ERROR_WARNING;
+ } else {
+ *new_state = CAN_STATE_ERROR_ACTIVE;
+ }
+ }
+}
+
+static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv,
+ u8 bus_status,
+ const struct can_berr_counter *bec)
+{
+ struct net_device *netdev = priv->netdev;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct net_device_stats *stats;
+ enum can_state new_state, old_state;
+
+ old_state = priv->can.state;
+
+ kvaser_usb_hydra_bus_status_to_can_state(priv, bus_status, bec,
+ &new_state);
+
+ if (new_state == old_state)
+ return;
+
+ /* Ignore state change if previous state was STOPPED and the new state
+ * is BUS_OFF. Firmware always report this as BUS_OFF, since firmware
+ * does not distinguish between BUS_OFF and STOPPED.
+ */
+ if (old_state == CAN_STATE_STOPPED && new_state == CAN_STATE_BUS_OFF)
+ return;
+
+ skb = alloc_can_err_skb(netdev, &cf);
+ if (skb) {
+ enum can_state tx_state, rx_state;
+
+ tx_state = (bec->txerr >= bec->rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+ rx_state = (bec->txerr <= bec->rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+ can_change_state(netdev, cf, tx_state, rx_state);
+ }
+
+ if (new_state == CAN_STATE_BUS_OFF && old_state < CAN_STATE_BUS_OFF) {
+ if (!priv->can.restart_ms)
+ kvaser_usb_hydra_send_simple_cmd_async
+ (priv, CMD_STOP_CHIP_REQ);
+
+ can_bus_off(netdev);
+ }
+
+ if (!skb) {
+ netdev_warn(netdev, "No memory left for err_skb\n");
+ return;
+ }
+
+ if (priv->can.restart_ms &&
+ old_state >= CAN_STATE_BUS_OFF &&
+ new_state < CAN_STATE_BUS_OFF)
+ priv->can.can_stats.restarts++;
+
+ cf->data[6] = bec->txerr;
+ cf->data[7] = bec->rxerr;
+
+ stats = &netdev->stats;
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+}
+
+static void kvaser_usb_hydra_state_event(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+ struct can_berr_counter bec;
+ u8 bus_status;
+
+ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
+ if (!priv)
+ return;
+
+ bus_status = cmd->chip_state_event.bus_status;
+ bec.txerr = cmd->chip_state_event.tx_err_counter;
+ bec.rxerr = cmd->chip_state_event.rx_err_counter;
+
+ kvaser_usb_hydra_update_state(priv, bus_status, &bec);
+ priv->bec.txerr = bec.txerr;
+ priv->bec.rxerr = bec.rxerr;
+}
+
+static void kvaser_usb_hydra_error_event_parameter(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ /* info1 will contain the offending cmd_no */
+ switch (le16_to_cpu(cmd->error_event.info1)) {
+ case CMD_START_CHIP_REQ:
+ dev_warn(&dev->intf->dev,
+ "CMD_START_CHIP_REQ error in parameter\n");
+ break;
+
+ case CMD_STOP_CHIP_REQ:
+ dev_warn(&dev->intf->dev,
+ "CMD_STOP_CHIP_REQ error in parameter\n");
+ break;
+
+ case CMD_FLUSH_QUEUE:
+ dev_warn(&dev->intf->dev,
+ "CMD_FLUSH_QUEUE error in parameter\n");
+ break;
+
+ case CMD_SET_BUSPARAMS_REQ:
+ dev_warn(&dev->intf->dev,
+ "Set bittiming failed. Error in parameter\n");
+ break;
+
+ case CMD_SET_BUSPARAMS_FD_REQ:
+ dev_warn(&dev->intf->dev,
+ "Set data bittiming failed. Error in parameter\n");
+ break;
+
+ default:
+ dev_warn(&dev->intf->dev,
+ "Unhandled parameter error event cmd_no (%u)\n",
+ le16_to_cpu(cmd->error_event.info1));
+ break;
+ }
+}
+
+static void kvaser_usb_hydra_error_event(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ switch (cmd->error_event.error_code) {
+ case KVASER_USB_HYDRA_ERROR_EVENT_PARAM:
+ kvaser_usb_hydra_error_event_parameter(dev, cmd);
+ break;
+
+ case KVASER_USB_HYDRA_ERROR_EVENT_CAN:
+ /* Wrong channel mapping?! This should never happen!
+ * info1 will contain the offending cmd_no
+ */
+ dev_err(&dev->intf->dev,
+ "Received CAN error event for cmd_no (%u)\n",
+ le16_to_cpu(cmd->error_event.info1));
+ break;
+
+ default:
+ dev_warn(&dev->intf->dev,
+ "Unhandled error event (%d)\n",
+ cmd->error_event.error_code);
+ break;
+ }
+}
+
+static void
+kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
+ const struct kvaser_err_frame_data *err_frame_data,
+ ktime_t hwtstamp)
+{
+ struct net_device *netdev = priv->netdev;
+ struct net_device_stats *stats = &netdev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct can_berr_counter bec;
+ enum can_state new_state, old_state;
+ u8 bus_status;
+
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+
+ bus_status = err_frame_data->bus_status;
+ bec.txerr = err_frame_data->tx_err_counter;
+ bec.rxerr = err_frame_data->rx_err_counter;
+
+ old_state = priv->can.state;
+ kvaser_usb_hydra_bus_status_to_can_state(priv, bus_status, &bec,
+ &new_state);
+
+ skb = alloc_can_err_skb(netdev, &cf);
+
+ if (new_state != old_state) {
+ if (skb) {
+ enum can_state tx_state, rx_state;
+
+ tx_state = (bec.txerr >= bec.rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+ rx_state = (bec.txerr <= bec.rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+
+ can_change_state(netdev, cf, tx_state, rx_state);
+ }
+
+ if (new_state == CAN_STATE_BUS_OFF) {
+ if (!priv->can.restart_ms)
+ kvaser_usb_hydra_send_simple_cmd_async
+ (priv, CMD_STOP_CHIP_REQ);
+
+ can_bus_off(netdev);
+ }
+
+ if (priv->can.restart_ms &&
+ old_state >= CAN_STATE_BUS_OFF &&
+ new_state < CAN_STATE_BUS_OFF)
+ cf->can_id |= CAN_ERR_RESTARTED;
+ }
+
+ if (!skb) {
+ stats->rx_dropped++;
+ netdev_warn(netdev, "No memory left for err_skb\n");
+ return;
+ }
+
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = hwtstamp;
+
+ cf->can_id |= CAN_ERR_BUSERROR;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+
+ priv->bec.txerr = bec.txerr;
+ priv->bec.rxerr = bec.rxerr;
+}
+
+static void kvaser_usb_hydra_one_shot_fail(struct kvaser_usb_net_priv *priv,
+ const struct kvaser_cmd_ext *cmd)
+{
+ struct net_device *netdev = priv->netdev;
+ struct net_device_stats *stats = &netdev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u32 flags;
+
+ skb = alloc_can_err_skb(netdev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ netdev_warn(netdev, "No memory left for err_skb\n");
+ return;
+ }
+
+ cf->can_id |= CAN_ERR_BUSERROR;
+ flags = le32_to_cpu(cmd->tx_ack.flags);
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_OSM_NACK)
+ cf->can_id |= CAN_ERR_ACK;
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_ABL) {
+ cf->can_id |= CAN_ERR_LOSTARB;
+ priv->can.can_stats.arbitration_lost++;
+ }
+
+ stats->tx_errors++;
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+}
+
+static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_tx_urb_context *context;
+ struct kvaser_usb_net_priv *priv;
+ unsigned long irq_flags;
+ bool one_shot_fail = false;
+ u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd);
+
+ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
+ if (!priv)
+ return;
+
+ if (!netif_device_present(priv->netdev))
+ return;
+
+ if (cmd->header.cmd_no == CMD_EXTENDED) {
+ struct kvaser_cmd_ext *cmd_ext = (struct kvaser_cmd_ext *)cmd;
+ u32 flags = le32_to_cpu(cmd_ext->tx_ack.flags);
+
+ if (flags & (KVASER_USB_HYDRA_CF_FLAG_OSM_NACK |
+ KVASER_USB_HYDRA_CF_FLAG_ABL)) {
+ kvaser_usb_hydra_one_shot_fail(priv, cmd_ext);
+ one_shot_fail = true;
+ }
+ }
+
+ context = &priv->tx_contexts[transid % dev->max_tx_urbs];
+ if (!one_shot_fail) {
+ struct net_device_stats *stats = &priv->netdev->stats;
+
+ stats->tx_packets++;
+ stats->tx_bytes += can_dlc2len(context->dlc);
+ }
+
+ spin_lock_irqsave(&priv->tx_contexts_lock, irq_flags);
+
+ can_get_echo_skb(priv->netdev, context->echo_index);
+ context->echo_index = dev->max_tx_urbs;
+ --priv->active_tx_contexts;
+ netif_wake_queue(priv->netdev);
+
+ spin_unlock_irqrestore(&priv->tx_contexts_lock, irq_flags);
+}
+
+static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv = NULL;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct net_device_stats *stats;
+ u8 flags;
+ ktime_t hwtstamp;
+
+ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
+ if (!priv)
+ return;
+
+ stats = &priv->netdev->stats;
+
+ flags = cmd->rx_can.flags;
+ hwtstamp = kvaser_usb_hydra_ktime_from_rx_cmd(dev->cfg, cmd);
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME) {
+ kvaser_usb_hydra_error_frame(priv, &cmd->rx_can.err_frame_data,
+ hwtstamp);
+ return;
+ }
+
+ skb = alloc_can_skb(priv->netdev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = hwtstamp;
+
+ cf->can_id = le32_to_cpu(cmd->rx_can.id);
+
+ if (cf->can_id & KVASER_USB_HYDRA_EXTENDED_FRAME_ID) {
+ cf->can_id &= CAN_EFF_MASK;
+ cf->can_id |= CAN_EFF_FLAG;
+ } else {
+ cf->can_id &= CAN_SFF_MASK;
+ }
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_OVERRUN)
+ kvaser_usb_can_rx_over_error(priv->netdev);
+
+ cf->can_dlc = get_can_dlc(cmd->rx_can.dlc);
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(cf->data, cmd->rx_can.data, cf->can_dlc);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+}
+
+static void kvaser_usb_hydra_rx_msg_ext(const struct kvaser_usb *dev,
+ const struct kvaser_cmd_ext *cmd)
+{
+ struct kvaser_cmd *std_cmd = (struct kvaser_cmd *)cmd;
+ struct kvaser_usb_net_priv *priv;
+ struct canfd_frame *cf;
+ struct sk_buff *skb;
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct net_device_stats *stats;
+ u32 flags;
+ u8 dlc;
+ u32 kcan_header;
+ ktime_t hwtstamp;
+
+ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, std_cmd);
+ if (!priv)
+ return;
+
+ stats = &priv->netdev->stats;
+
+ kcan_header = le32_to_cpu(cmd->rx_can.kcan_header);
+ dlc = (kcan_header & KVASER_USB_KCAN_DATA_DLC_MASK) >>
+ KVASER_USB_KCAN_DATA_DLC_SHIFT;
+
+ flags = le32_to_cpu(cmd->rx_can.flags);
+ hwtstamp = kvaser_usb_hydra_ktime_from_rx_cmd(dev->cfg, std_cmd);
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME) {
+ kvaser_usb_hydra_error_frame(priv, &cmd->rx_can.err_frame_data,
+ hwtstamp);
+ return;
+ }
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_FDF)
+ skb = alloc_canfd_skb(priv->netdev, &cf);
+ else
+ skb = alloc_can_skb(priv->netdev, (struct can_frame **)&cf);
+
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = hwtstamp;
+
+ cf->can_id = le32_to_cpu(cmd->rx_can.id);
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID) {
+ cf->can_id &= CAN_EFF_MASK;
+ cf->can_id |= CAN_EFF_FLAG;
+ } else {
+ cf->can_id &= CAN_SFF_MASK;
+ }
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_OVERRUN)
+ kvaser_usb_can_rx_over_error(priv->netdev);
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_FDF) {
+ cf->len = can_dlc2len(get_canfd_dlc(dlc));
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_BRS)
+ cf->flags |= CANFD_BRS;
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_ESI)
+ cf->flags |= CANFD_ESI;
+ } else {
+ cf->len = get_can_dlc(dlc);
+ }
+
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(cf->data, cmd->rx_can.kcan_payload, cf->len);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->len;
+ netif_rx(skb);
+}
+
+static void kvaser_usb_hydra_handle_cmd_std(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ switch (cmd->header.cmd_no) {
+ case CMD_START_CHIP_RESP:
+ kvaser_usb_hydra_start_chip_reply(dev, cmd);
+ break;
+
+ case CMD_STOP_CHIP_RESP:
+ kvaser_usb_hydra_stop_chip_reply(dev, cmd);
+ break;
+
+ case CMD_FLUSH_QUEUE_RESP:
+ kvaser_usb_hydra_flush_queue_reply(dev, cmd);
+ break;
+
+ case CMD_CHIP_STATE_EVENT:
+ kvaser_usb_hydra_state_event(dev, cmd);
+ break;
+
+ case CMD_ERROR_EVENT:
+ kvaser_usb_hydra_error_event(dev, cmd);
+ break;
+
+ case CMD_TX_ACKNOWLEDGE:
+ kvaser_usb_hydra_tx_acknowledge(dev, cmd);
+ break;
+
+ case CMD_RX_MESSAGE:
+ kvaser_usb_hydra_rx_msg_std(dev, cmd);
+ break;
+
+ /* Ignored commands */
+ case CMD_SET_BUSPARAMS_RESP:
+ case CMD_SET_BUSPARAMS_FD_RESP:
+ break;
+
+ default:
+ dev_warn(&dev->intf->dev, "Unhandled command (%d)\n",
+ cmd->header.cmd_no);
+ break;
+ }
+}
+
+static void kvaser_usb_hydra_handle_cmd_ext(const struct kvaser_usb *dev,
+ const struct kvaser_cmd_ext *cmd)
+{
+ switch (cmd->cmd_no_ext) {
+ case CMD_TX_ACKNOWLEDGE_FD:
+ kvaser_usb_hydra_tx_acknowledge(dev, (struct kvaser_cmd *)cmd);
+ break;
+
+ case CMD_RX_MESSAGE_FD:
+ kvaser_usb_hydra_rx_msg_ext(dev, cmd);
+ break;
+
+ default:
+ dev_warn(&dev->intf->dev, "Unhandled extended command (%d)\n",
+ cmd->header.cmd_no);
+ break;
+ }
+}
+
+static void kvaser_usb_hydra_handle_cmd(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ if (cmd->header.cmd_no == CMD_EXTENDED)
+ kvaser_usb_hydra_handle_cmd_ext
+ (dev, (struct kvaser_cmd_ext *)cmd);
+ else
+ kvaser_usb_hydra_handle_cmd_std(dev, cmd);
+}
+
+static void *
+kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv,
+ const struct sk_buff *skb, int *frame_len,
+ int *cmd_len, u16 transid)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_cmd_ext *cmd;
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ u8 dlc = can_len2dlc(cf->len);
+ u8 nbr_of_bytes = cf->len;
+ u32 flags;
+ u32 id;
+ u32 kcan_id;
+ u32 kcan_header;
+
+ *frame_len = nbr_of_bytes;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd_ext), GFP_ATOMIC);
+ if (!cmd)
+ return NULL;
+
+ kvaser_usb_hydra_set_cmd_dest_he
+ ((struct kvaser_cmd *)cmd,
+ dev->card_data.hydra.channel_to_he[priv->channel]);
+ kvaser_usb_hydra_set_cmd_transid((struct kvaser_cmd *)cmd, transid);
+
+ cmd->header.cmd_no = CMD_EXTENDED;
+ cmd->cmd_no_ext = CMD_TX_CAN_MESSAGE_FD;
+
+ *cmd_len = ALIGN(sizeof(struct kvaser_cmd_ext) -
+ sizeof(cmd->tx_can.kcan_payload) + nbr_of_bytes,
+ 8);
+
+ cmd->len = cpu_to_le16(*cmd_len);
+
+ cmd->tx_can.databytes = nbr_of_bytes;
+ cmd->tx_can.dlc = dlc;
+
+ if (cf->can_id & CAN_EFF_FLAG) {
+ id = cf->can_id & CAN_EFF_MASK;
+ flags = KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID;
+ kcan_id = (cf->can_id & CAN_EFF_MASK) |
+ KVASER_USB_KCAN_DATA_IDE | KVASER_USB_KCAN_DATA_SRR;
+ } else {
+ id = cf->can_id & CAN_SFF_MASK;
+ flags = 0;
+ kcan_id = cf->can_id & CAN_SFF_MASK;
+ }
+
+ if (cf->can_id & CAN_ERR_FLAG)
+ flags |= KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME;
+
+ kcan_header = ((dlc << KVASER_USB_KCAN_DATA_DLC_SHIFT) &
+ KVASER_USB_KCAN_DATA_DLC_MASK) |
+ KVASER_USB_KCAN_DATA_AREQ |
+ (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT ?
+ KVASER_USB_KCAN_DATA_OSM : 0);
+
+ if (can_is_canfd_skb(skb)) {
+ kcan_header |= KVASER_USB_KCAN_DATA_FDF |
+ (cf->flags & CANFD_BRS ?
+ KVASER_USB_KCAN_DATA_BRS : 0);
+ } else {
+ if (cf->can_id & CAN_RTR_FLAG) {
+ kcan_id |= KVASER_USB_KCAN_DATA_RTR;
+ cmd->tx_can.databytes = 0;
+ flags |= KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME;
+ }
+ }
+
+ cmd->tx_can.kcan_id = cpu_to_le32(kcan_id);
+ cmd->tx_can.id = cpu_to_le32(id);
+ cmd->tx_can.flags = cpu_to_le32(flags);
+ cmd->tx_can.kcan_header = cpu_to_le32(kcan_header);
+
+ memcpy(cmd->tx_can.kcan_payload, cf->data, nbr_of_bytes);
+
+ return cmd;
+}
+
+static void *
+kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv,
+ const struct sk_buff *skb, int *frame_len,
+ int *cmd_len, u16 transid)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_cmd *cmd;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ u32 flags;
+ u32 id;
+
+ *frame_len = cf->can_dlc;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC);
+ if (!cmd)
+ return NULL;
+
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
+ kvaser_usb_hydra_set_cmd_transid(cmd, transid);
+
+ cmd->header.cmd_no = CMD_TX_CAN_MESSAGE;
+
+ *cmd_len = ALIGN(sizeof(struct kvaser_cmd), 8);
+
+ if (cf->can_id & CAN_EFF_FLAG) {
+ id = (cf->can_id & CAN_EFF_MASK);
+ id |= KVASER_USB_HYDRA_EXTENDED_FRAME_ID;
+ } else {
+ id = cf->can_id & CAN_SFF_MASK;
+ }
+
+ cmd->tx_can.dlc = cf->can_dlc;
+
+ flags = (cf->can_id & CAN_EFF_FLAG ?
+ KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID : 0);
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ flags |= KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME;
+
+ flags |= (cf->can_id & CAN_ERR_FLAG ?
+ KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME : 0);
+
+ cmd->tx_can.id = cpu_to_le32(id);
+ cmd->tx_can.flags = flags;
+
+ memcpy(cmd->tx_can.data, cf->data, *frame_len);
+
+ return cmd;
+}
+
+static int kvaser_usb_hydra_set_mode(struct net_device *netdev,
+ enum can_mode mode)
+{
+ int err = 0;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ /* CAN controller automatically recovers from BUS_OFF */
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev)
+{
+ struct kvaser_cmd *cmd;
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+ struct kvaser_usb *dev = priv->dev;
+ int tseg1 = bt->prop_seg + bt->phase_seg1;
+ int tseg2 = bt->phase_seg2;
+ int sjw = bt->sjw;
+ int err;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ;
+ cmd->set_busparams_req.bitrate = cpu_to_le32(bt->bitrate);
+ cmd->set_busparams_req.sjw = (u8)sjw;
+ cmd->set_busparams_req.tseg1 = (u8)tseg1;
+ cmd->set_busparams_req.tseg2 = (u8)tseg2;
+ cmd->set_busparams_req.nsamples = 1;
+
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+
+ err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+
+ kfree(cmd);
+
+ return err;
+}
+
+static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev)
+{
+ struct kvaser_cmd *cmd;
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct kvaser_usb *dev = priv->dev;
+ int tseg1 = dbt->prop_seg + dbt->phase_seg1;
+ int tseg2 = dbt->phase_seg2;
+ int sjw = dbt->sjw;
+ int err;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ;
+ cmd->set_busparams_req.bitrate_d = cpu_to_le32(dbt->bitrate);
+ cmd->set_busparams_req.sjw_d = (u8)sjw;
+ cmd->set_busparams_req.tseg1_d = (u8)tseg1;
+ cmd->set_busparams_req.tseg2_d = (u8)tseg2;
+ cmd->set_busparams_req.nsamples_d = 1;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
+ cmd->set_busparams_req.canfd_mode =
+ KVASER_USB_HYDRA_BUS_MODE_NONISO;
+ else
+ cmd->set_busparams_req.canfd_mode =
+ KVASER_USB_HYDRA_BUS_MODE_CANFD_ISO;
+ }
+
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+
+ err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+
+ kfree(cmd);
+
+ return err;
+}
+
+static int kvaser_usb_hydra_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ int err;
+
+ err = kvaser_usb_hydra_send_simple_cmd(priv->dev,
+ CMD_GET_CHIP_STATE_REQ,
+ priv->channel);
+ if (err)
+ return err;
+
+ *bec = priv->bec;
+
+ return 0;
+}
+
+static int kvaser_usb_hydra_setup_endpoints(struct kvaser_usb *dev)
+{
+ const struct usb_host_interface *iface_desc;
+ struct usb_endpoint_descriptor *ep;
+ int i;
+
+ iface_desc = &dev->intf->altsetting[0];
+
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+ ep = &iface_desc->endpoint[i].desc;
+
+ if (!dev->bulk_in && usb_endpoint_is_bulk_in(ep) &&
+ ep->bEndpointAddress == KVASER_USB_HYDRA_BULK_EP_IN_ADDR)
+ dev->bulk_in = ep;
+
+ if (!dev->bulk_out && usb_endpoint_is_bulk_out(ep) &&
+ ep->bEndpointAddress == KVASER_USB_HYDRA_BULK_EP_OUT_ADDR)
+ dev->bulk_out = ep;
+
+ if (dev->bulk_in && dev->bulk_out)
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static int kvaser_usb_hydra_init_card(struct kvaser_usb *dev)
+{
+ int err;
+ unsigned int i;
+ struct kvaser_usb_dev_card_data_hydra *card_data =
+ &dev->card_data.hydra;
+
+ card_data->transid = KVASER_USB_HYDRA_MIN_TRANSID;
+ spin_lock_init(&card_data->transid_lock);
+
+ memset(card_data->usb_rx_leftover, 0, KVASER_USB_HYDRA_MAX_CMD_LEN);
+ card_data->usb_rx_leftover_len = 0;
+ spin_lock_init(&card_data->usb_rx_leftover_lock);
+
+ memset(card_data->channel_to_he, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL,
+ sizeof(card_data->channel_to_he));
+ card_data->sysdbg_he = 0;
+
+ for (i = 0; i < KVASER_USB_MAX_NET_DEVICES; i++) {
+ err = kvaser_usb_hydra_map_channel
+ (dev,
+ (KVASER_USB_HYDRA_TRANSID_CANHE | i),
+ i, "CAN");
+ if (err) {
+ dev_err(&dev->intf->dev,
+ "CMD_MAP_CHANNEL_REQ failed for CAN%u\n", i);
+ return err;
+ }
+ }
+
+ err = kvaser_usb_hydra_map_channel(dev, KVASER_USB_HYDRA_TRANSID_SYSDBG,
+ 0, "SYSDBG");
+ if (err) {
+ dev_err(&dev->intf->dev,
+ "CMD_MAP_CHANNEL_REQ failed for SYSDBG\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev)
+{
+ struct kvaser_cmd cmd;
+ int err;
+
+ err = kvaser_usb_hydra_send_simple_cmd(dev, CMD_GET_SOFTWARE_INFO_REQ,
+ -1);
+ if (err)
+ return err;
+
+ memset(&cmd, 0, sizeof(struct kvaser_cmd));
+ err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_SOFTWARE_INFO_RESP, &cmd);
+ if (err)
+ return err;
+
+ dev->max_tx_urbs = min_t(unsigned int, KVASER_USB_MAX_TX_URBS,
+ le16_to_cpu(cmd.sw_info.max_outstanding_tx));
+
+ return 0;
+}
+
+static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
+{
+ struct kvaser_cmd *cmd;
+ int err;
+ u32 flags;
+ struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_GET_SOFTWARE_DETAILS_REQ;
+ cmd->sw_detail_req.use_ext_cmd = 1;
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL);
+
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+
+ err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ if (err)
+ goto end;
+
+ err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_SOFTWARE_DETAILS_RESP,
+ cmd);
+ if (err)
+ goto end;
+
+ dev->fw_version = le32_to_cpu(cmd->sw_detail_res.sw_version);
+ flags = le32_to_cpu(cmd->sw_detail_res.sw_flags);
+
+ if (flags & KVASER_USB_HYDRA_SW_FLAG_FW_BAD) {
+ dev_err(&dev->intf->dev,
+ "Bad firmware, device refuse to run!\n");
+ err = -EINVAL;
+ goto end;
+ }
+
+ if (flags & KVASER_USB_HYDRA_SW_FLAG_FW_BETA)
+ dev_info(&dev->intf->dev, "Beta firmware in use\n");
+
+ if (flags & KVASER_USB_HYDRA_SW_FLAG_EXT_CAP)
+ card_data->capabilities |= KVASER_USB_CAP_EXT_CAP;
+
+ if (flags & KVASER_USB_HYDRA_SW_FLAG_EXT_CMD)
+ card_data->capabilities |= KVASER_USB_HYDRA_CAP_EXT_CMD;
+
+ if (flags & KVASER_USB_HYDRA_SW_FLAG_CANFD)
+ card_data->ctrlmode_supported |= CAN_CTRLMODE_FD;
+
+ if (flags & KVASER_USB_HYDRA_SW_FLAG_NONISO)
+ card_data->ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO;
+
+ if (flags & KVASER_USB_HYDRA_SW_FLAG_FREQ_80M)
+ dev->cfg = &kvaser_usb_hydra_dev_cfg_kcan;
+ else
+ dev->cfg = &kvaser_usb_hydra_dev_cfg_flexc;
+
+end:
+ kfree(cmd);
+
+ return err;
+}
+
+static int kvaser_usb_hydra_get_card_info(struct kvaser_usb *dev)
+{
+ struct kvaser_cmd cmd;
+ int err;
+
+ err = kvaser_usb_hydra_send_simple_cmd(dev, CMD_GET_CARD_INFO_REQ, -1);
+ if (err)
+ return err;
+
+ memset(&cmd, 0, sizeof(struct kvaser_cmd));
+ err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_CARD_INFO_RESP, &cmd);
+ if (err)
+ return err;
+
+ dev->nchannels = cmd.card_info.nchannels;
+ if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int kvaser_usb_hydra_get_capabilities(struct kvaser_usb *dev)
+{
+ int err;
+ u16 status;
+
+ if (!(dev->card_data.capabilities & KVASER_USB_CAP_EXT_CAP)) {
+ dev_info(&dev->intf->dev,
+ "No extended capability support. Upgrade your device.\n");
+ return 0;
+ }
+
+ err = kvaser_usb_hydra_get_single_capability
+ (dev,
+ KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE,
+ &status);
+ if (err)
+ return err;
+ if (status)
+ dev_info(&dev->intf->dev,
+ "KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE failed %u\n",
+ status);
+
+ err = kvaser_usb_hydra_get_single_capability
+ (dev,
+ KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT,
+ &status);
+ if (err)
+ return err;
+ if (status)
+ dev_info(&dev->intf->dev,
+ "KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT failed %u\n",
+ status);
+
+ err = kvaser_usb_hydra_get_single_capability
+ (dev, KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT,
+ &status);
+ if (err)
+ return err;
+ if (status)
+ dev_info(&dev->intf->dev,
+ "KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT failed %u\n",
+ status);
+
+ return 0;
+}
+
+static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_cmd *cmd;
+ int err;
+
+ if ((priv->can.ctrlmode &
+ (CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO)) ==
+ CAN_CTRLMODE_FD_NON_ISO) {
+ netdev_warn(priv->netdev,
+ "CTRLMODE_FD shall be on if CTRLMODE_FD_NON_ISO is on\n");
+ return -EINVAL;
+ }
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_SET_DRIVERMODE_REQ;
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_LISTEN;
+ else
+ cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_NORMAL;
+
+ err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ kfree(cmd);
+
+ return err;
+}
+
+static int kvaser_usb_hydra_start_chip(struct kvaser_usb_net_priv *priv)
+{
+ int err;
+
+ init_completion(&priv->start_comp);
+
+ err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_START_CHIP_REQ,
+ priv->channel);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&priv->start_comp,
+ msecs_to_jiffies(KVASER_USB_TIMEOUT)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int kvaser_usb_hydra_stop_chip(struct kvaser_usb_net_priv *priv)
+{
+ int err;
+
+ init_completion(&priv->stop_comp);
+
+ /* Make sure we do not report invalid BUS_OFF from CMD_CHIP_STATE_EVENT
+ * see comment in kvaser_usb_hydra_update_state()
+ */
+ priv->can.state = CAN_STATE_STOPPED;
+
+ err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_STOP_CHIP_REQ,
+ priv->channel);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&priv->stop_comp,
+ msecs_to_jiffies(KVASER_USB_TIMEOUT)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int kvaser_usb_hydra_flush_queue(struct kvaser_usb_net_priv *priv)
+{
+ int err;
+
+ init_completion(&priv->flush_comp);
+
+ err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_FLUSH_QUEUE,
+ priv->channel);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&priv->flush_comp,
+ msecs_to_jiffies(KVASER_USB_TIMEOUT)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/* A single extended hydra command can be transmitted in multiple transfers
+ * We have to buffer partial hydra commands, and handle them on next callback.
+ */
+static void kvaser_usb_hydra_read_bulk_callback(struct kvaser_usb *dev,
+ void *buf, int len)
+{
+ unsigned long irq_flags;
+ struct kvaser_cmd *cmd;
+ int pos = 0;
+ size_t cmd_len;
+ struct kvaser_usb_dev_card_data_hydra *card_data =
+ &dev->card_data.hydra;
+ int usb_rx_leftover_len;
+ spinlock_t *usb_rx_leftover_lock = &card_data->usb_rx_leftover_lock;
+
+ spin_lock_irqsave(usb_rx_leftover_lock, irq_flags);
+ usb_rx_leftover_len = card_data->usb_rx_leftover_len;
+ if (usb_rx_leftover_len) {
+ int remaining_bytes;
+
+ cmd = (struct kvaser_cmd *)card_data->usb_rx_leftover;
+
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+
+ remaining_bytes = min_t(unsigned int, len,
+ cmd_len - usb_rx_leftover_len);
+ /* Make sure we do not overflow usb_rx_leftover */
+ if (remaining_bytes + usb_rx_leftover_len >
+ KVASER_USB_HYDRA_MAX_CMD_LEN) {
+ dev_err(&dev->intf->dev, "Format error\n");
+ spin_unlock_irqrestore(usb_rx_leftover_lock, irq_flags);
+ return;
+ }
+
+ memcpy(card_data->usb_rx_leftover + usb_rx_leftover_len, buf,
+ remaining_bytes);
+ pos += remaining_bytes;
+
+ if (remaining_bytes + usb_rx_leftover_len == cmd_len) {
+ kvaser_usb_hydra_handle_cmd(dev, cmd);
+ usb_rx_leftover_len = 0;
+ } else {
+ /* Command still not complete */
+ usb_rx_leftover_len += remaining_bytes;
+ }
+ card_data->usb_rx_leftover_len = usb_rx_leftover_len;
+ }
+ spin_unlock_irqrestore(usb_rx_leftover_lock, irq_flags);
+
+ while (pos < len) {
+ cmd = buf + pos;
+
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+
+ if (pos + cmd_len > len) {
+ /* We got first part of a command */
+ int leftover_bytes;
+
+ leftover_bytes = len - pos;
+ /* Make sure we do not overflow usb_rx_leftover */
+ if (leftover_bytes > KVASER_USB_HYDRA_MAX_CMD_LEN) {
+ dev_err(&dev->intf->dev, "Format error\n");
+ return;
+ }
+ spin_lock_irqsave(usb_rx_leftover_lock, irq_flags);
+ memcpy(card_data->usb_rx_leftover, buf + pos,
+ leftover_bytes);
+ card_data->usb_rx_leftover_len = leftover_bytes;
+ spin_unlock_irqrestore(usb_rx_leftover_lock, irq_flags);
+ break;
+ }
+
+ kvaser_usb_hydra_handle_cmd(dev, cmd);
+ pos += cmd_len;
+ }
+}
+
+static void *
+kvaser_usb_hydra_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
+ const struct sk_buff *skb, int *frame_len,
+ int *cmd_len, u16 transid)
+{
+ void *buf;
+
+ if (priv->dev->card_data.capabilities & KVASER_USB_HYDRA_CAP_EXT_CMD)
+ buf = kvaser_usb_hydra_frame_to_cmd_ext(priv, skb, frame_len,
+ cmd_len, transid);
+ else
+ buf = kvaser_usb_hydra_frame_to_cmd_std(priv, skb, frame_len,
+ cmd_len, transid);
+
+ return buf;
+}
+
+const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = {
+ .dev_set_mode = kvaser_usb_hydra_set_mode,
+ .dev_set_bittiming = kvaser_usb_hydra_set_bittiming,
+ .dev_set_data_bittiming = kvaser_usb_hydra_set_data_bittiming,
+ .dev_get_berr_counter = kvaser_usb_hydra_get_berr_counter,
+ .dev_setup_endpoints = kvaser_usb_hydra_setup_endpoints,
+ .dev_init_card = kvaser_usb_hydra_init_card,
+ .dev_get_software_info = kvaser_usb_hydra_get_software_info,
+ .dev_get_software_details = kvaser_usb_hydra_get_software_details,
+ .dev_get_card_info = kvaser_usb_hydra_get_card_info,
+ .dev_get_capabilities = kvaser_usb_hydra_get_capabilities,
+ .dev_set_opt_mode = kvaser_usb_hydra_set_opt_mode,
+ .dev_start_chip = kvaser_usb_hydra_start_chip,
+ .dev_stop_chip = kvaser_usb_hydra_stop_chip,
+ .dev_reset_chip = NULL,
+ .dev_flush_queue = kvaser_usb_hydra_flush_queue,
+ .dev_read_bulk_callback = kvaser_usb_hydra_read_bulk_callback,
+ .dev_frame_to_cmd = kvaser_usb_hydra_frame_to_cmd,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan = {
+ .clock = {
+ .freq = 80000000,
+ },
+ .timestamp_freq = 80,
+ .bittiming_const = &kvaser_usb_hydra_kcan_bittiming_c,
+ .data_bittiming_const = &kvaser_usb_hydra_kcan_bittiming_c,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc = {
+ .clock = {
+ .freq = 24000000,
+ },
+ .timestamp_freq = 1,
+ .bittiming_const = &kvaser_usb_hydra_flexc_bittiming_c,
+};
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
new file mode 100644
index 000000000000..07d2f3aa2c02
--- /dev/null
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -0,0 +1,1358 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Parts of this driver are based on the following:
+ * - Kvaser linux leaf driver (version 4.78)
+ * - CAN driver for esd CAN-USB/2
+ * - Kvaser linux usbcanII driver (version 5.3)
+ *
+ * Copyright (C) 2002-2018 KVASER AB, Sweden. All rights reserved.
+ * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
+ * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
+ * Copyright (C) 2015 Valeo S.A.
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/netlink.h>
+
+#include "kvaser_usb.h"
+
+/* Forward declaration */
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg;
+
+#define CAN_USB_CLOCK 8000000
+#define MAX_USBCAN_NET_DEVICES 2
+
+/* Command header size */
+#define CMD_HEADER_LEN 2
+
+/* Kvaser CAN message flags */
+#define MSG_FLAG_ERROR_FRAME BIT(0)
+#define MSG_FLAG_OVERRUN BIT(1)
+#define MSG_FLAG_NERR BIT(2)
+#define MSG_FLAG_WAKEUP BIT(3)
+#define MSG_FLAG_REMOTE_FRAME BIT(4)
+#define MSG_FLAG_RESERVED BIT(5)
+#define MSG_FLAG_TX_ACK BIT(6)
+#define MSG_FLAG_TX_REQUEST BIT(7)
+
+/* CAN states (M16C CxSTRH register) */
+#define M16C_STATE_BUS_RESET BIT(0)
+#define M16C_STATE_BUS_ERROR BIT(4)
+#define M16C_STATE_BUS_PASSIVE BIT(5)
+#define M16C_STATE_BUS_OFF BIT(6)
+
+/* Leaf/usbcan command ids */
+#define CMD_RX_STD_MESSAGE 12
+#define CMD_TX_STD_MESSAGE 13
+#define CMD_RX_EXT_MESSAGE 14
+#define CMD_TX_EXT_MESSAGE 15
+#define CMD_SET_BUS_PARAMS 16
+#define CMD_CHIP_STATE_EVENT 20
+#define CMD_SET_CTRL_MODE 21
+#define CMD_RESET_CHIP 24
+#define CMD_START_CHIP 26
+#define CMD_START_CHIP_REPLY 27
+#define CMD_STOP_CHIP 28
+#define CMD_STOP_CHIP_REPLY 29
+
+#define CMD_USBCAN_CLOCK_OVERFLOW_EVENT 33
+
+#define CMD_GET_CARD_INFO 34
+#define CMD_GET_CARD_INFO_REPLY 35
+#define CMD_GET_SOFTWARE_INFO 38
+#define CMD_GET_SOFTWARE_INFO_REPLY 39
+#define CMD_FLUSH_QUEUE 48
+#define CMD_TX_ACKNOWLEDGE 50
+#define CMD_CAN_ERROR_EVENT 51
+#define CMD_FLUSH_QUEUE_REPLY 68
+
+#define CMD_LEAF_LOG_MESSAGE 106
+
+/* error factors */
+#define M16C_EF_ACKE BIT(0)
+#define M16C_EF_CRCE BIT(1)
+#define M16C_EF_FORME BIT(2)
+#define M16C_EF_STFE BIT(3)
+#define M16C_EF_BITE0 BIT(4)
+#define M16C_EF_BITE1 BIT(5)
+#define M16C_EF_RCVE BIT(6)
+#define M16C_EF_TRE BIT(7)
+
+/* Only Leaf-based devices can report M16C error factors,
+ * thus define our own error status flags for USBCANII
+ */
+#define USBCAN_ERROR_STATE_NONE 0
+#define USBCAN_ERROR_STATE_TX_ERROR BIT(0)
+#define USBCAN_ERROR_STATE_RX_ERROR BIT(1)
+#define USBCAN_ERROR_STATE_BUSERROR BIT(2)
+
+/* bittiming parameters */
+#define KVASER_USB_TSEG1_MIN 1
+#define KVASER_USB_TSEG1_MAX 16
+#define KVASER_USB_TSEG2_MIN 1
+#define KVASER_USB_TSEG2_MAX 8
+#define KVASER_USB_SJW_MAX 4
+#define KVASER_USB_BRP_MIN 1
+#define KVASER_USB_BRP_MAX 64
+#define KVASER_USB_BRP_INC 1
+
+/* ctrl modes */
+#define KVASER_CTRL_MODE_NORMAL 1
+#define KVASER_CTRL_MODE_SILENT 2
+#define KVASER_CTRL_MODE_SELFRECEPTION 3
+#define KVASER_CTRL_MODE_OFF 4
+
+/* Extended CAN identifier flag */
+#define KVASER_EXTENDED_FRAME BIT(31)
+
+struct kvaser_cmd_simple {
+ u8 tid;
+ u8 channel;
+} __packed;
+
+struct kvaser_cmd_cardinfo {
+ u8 tid;
+ u8 nchannels;
+ __le32 serial_number;
+ __le32 padding0;
+ __le32 clock_resolution;
+ __le32 mfgdate;
+ u8 ean[8];
+ u8 hw_revision;
+ union {
+ struct {
+ u8 usb_hs_mode;
+ } __packed leaf1;
+ struct {
+ u8 padding;
+ } __packed usbcan1;
+ } __packed;
+ __le16 padding1;
+} __packed;
+
+struct leaf_cmd_softinfo {
+ u8 tid;
+ u8 padding0;
+ __le32 sw_options;
+ __le32 fw_version;
+ __le16 max_outstanding_tx;
+ __le16 padding1[9];
+} __packed;
+
+struct usbcan_cmd_softinfo {
+ u8 tid;
+ u8 fw_name[5];
+ __le16 max_outstanding_tx;
+ u8 padding[6];
+ __le32 fw_version;
+ __le16 checksum;
+ __le16 sw_options;
+} __packed;
+
+struct kvaser_cmd_busparams {
+ u8 tid;
+ u8 channel;
+ __le32 bitrate;
+ u8 tseg1;
+ u8 tseg2;
+ u8 sjw;
+ u8 no_samp;
+} __packed;
+
+struct kvaser_cmd_tx_can {
+ u8 channel;
+ u8 tid;
+ u8 data[14];
+ union {
+ struct {
+ u8 padding;
+ u8 flags;
+ } __packed leaf;
+ struct {
+ u8 flags;
+ u8 padding;
+ } __packed usbcan;
+ } __packed;
+} __packed;
+
+struct kvaser_cmd_rx_can_header {
+ u8 channel;
+ u8 flag;
+} __packed;
+
+struct leaf_cmd_rx_can {
+ u8 channel;
+ u8 flag;
+
+ __le16 time[3];
+ u8 data[14];
+} __packed;
+
+struct usbcan_cmd_rx_can {
+ u8 channel;
+ u8 flag;
+
+ u8 data[14];
+ __le16 time;
+} __packed;
+
+struct leaf_cmd_chip_state_event {
+ u8 tid;
+ u8 channel;
+
+ __le16 time[3];
+ u8 tx_errors_count;
+ u8 rx_errors_count;
+
+ u8 status;
+ u8 padding[3];
+} __packed;
+
+struct usbcan_cmd_chip_state_event {
+ u8 tid;
+ u8 channel;
+
+ u8 tx_errors_count;
+ u8 rx_errors_count;
+ __le16 time;
+
+ u8 status;
+ u8 padding[3];
+} __packed;
+
+struct kvaser_cmd_tx_acknowledge_header {
+ u8 channel;
+ u8 tid;
+} __packed;
+
+struct leaf_cmd_error_event {
+ u8 tid;
+ u8 flags;
+ __le16 time[3];
+ u8 channel;
+ u8 padding;
+ u8 tx_errors_count;
+ u8 rx_errors_count;
+ u8 status;
+ u8 error_factor;
+} __packed;
+
+struct usbcan_cmd_error_event {
+ u8 tid;
+ u8 padding;
+ u8 tx_errors_count_ch0;
+ u8 rx_errors_count_ch0;
+ u8 tx_errors_count_ch1;
+ u8 rx_errors_count_ch1;
+ u8 status_ch0;
+ u8 status_ch1;
+ __le16 time;
+} __packed;
+
+struct kvaser_cmd_ctrl_mode {
+ u8 tid;
+ u8 channel;
+ u8 ctrl_mode;
+ u8 padding[3];
+} __packed;
+
+struct kvaser_cmd_flush_queue {
+ u8 tid;
+ u8 channel;
+ u8 flags;
+ u8 padding[3];
+} __packed;
+
+struct leaf_cmd_log_message {
+ u8 channel;
+ u8 flags;
+ __le16 time[3];
+ u8 dlc;
+ u8 time_offset;
+ __le32 id;
+ u8 data[8];
+} __packed;
+
+struct kvaser_cmd {
+ u8 len;
+ u8 id;
+ union {
+ struct kvaser_cmd_simple simple;
+ struct kvaser_cmd_cardinfo cardinfo;
+ struct kvaser_cmd_busparams busparams;
+
+ struct kvaser_cmd_rx_can_header rx_can_header;
+ struct kvaser_cmd_tx_acknowledge_header tx_acknowledge_header;
+
+ union {
+ struct leaf_cmd_softinfo softinfo;
+ struct leaf_cmd_rx_can rx_can;
+ struct leaf_cmd_chip_state_event chip_state_event;
+ struct leaf_cmd_error_event error_event;
+ struct leaf_cmd_log_message log_message;
+ } __packed leaf;
+
+ union {
+ struct usbcan_cmd_softinfo softinfo;
+ struct usbcan_cmd_rx_can rx_can;
+ struct usbcan_cmd_chip_state_event chip_state_event;
+ struct usbcan_cmd_error_event error_event;
+ } __packed usbcan;
+
+ struct kvaser_cmd_tx_can tx_can;
+ struct kvaser_cmd_ctrl_mode ctrl_mode;
+ struct kvaser_cmd_flush_queue flush_queue;
+ } u;
+} __packed;
+
+/* Summary of a kvaser error event, for a unified Leaf/Usbcan error
+ * handling. Some discrepancies between the two families exist:
+ *
+ * - USBCAN firmware does not report M16C "error factors"
+ * - USBCAN controllers has difficulties reporting if the raised error
+ * event is for ch0 or ch1. They leave such arbitration to the OS
+ * driver by letting it compare error counters with previous values
+ * and decide the error event's channel. Thus for USBCAN, the channel
+ * field is only advisory.
+ */
+struct kvaser_usb_err_summary {
+ u8 channel, status, txerr, rxerr;
+ union {
+ struct {
+ u8 error_factor;
+ } leaf;
+ struct {
+ u8 other_ch_status;
+ u8 error_state;
+ } usbcan;
+ };
+};
+
+static void *
+kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
+ const struct sk_buff *skb, int *frame_len,
+ int *cmd_len, u16 transid)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_cmd *cmd;
+ u8 *cmd_tx_can_flags = NULL; /* GCC */
+ struct can_frame *cf = (struct can_frame *)skb->data;
+
+ *frame_len = cf->can_dlc;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (cmd) {
+ cmd->u.tx_can.tid = transid & 0xff;
+ cmd->len = *cmd_len = CMD_HEADER_LEN +
+ sizeof(struct kvaser_cmd_tx_can);
+ cmd->u.tx_can.channel = priv->channel;
+
+ switch (dev->card_data.leaf.family) {
+ case KVASER_LEAF:
+ cmd_tx_can_flags = &cmd->u.tx_can.leaf.flags;
+ break;
+ case KVASER_USBCAN:
+ cmd_tx_can_flags = &cmd->u.tx_can.usbcan.flags;
+ break;
+ }
+
+ *cmd_tx_can_flags = 0;
+
+ if (cf->can_id & CAN_EFF_FLAG) {
+ cmd->id = CMD_TX_EXT_MESSAGE;
+ cmd->u.tx_can.data[0] = (cf->can_id >> 24) & 0x1f;
+ cmd->u.tx_can.data[1] = (cf->can_id >> 18) & 0x3f;
+ cmd->u.tx_can.data[2] = (cf->can_id >> 14) & 0x0f;
+ cmd->u.tx_can.data[3] = (cf->can_id >> 6) & 0xff;
+ cmd->u.tx_can.data[4] = cf->can_id & 0x3f;
+ } else {
+ cmd->id = CMD_TX_STD_MESSAGE;
+ cmd->u.tx_can.data[0] = (cf->can_id >> 6) & 0x1f;
+ cmd->u.tx_can.data[1] = cf->can_id & 0x3f;
+ }
+
+ cmd->u.tx_can.data[5] = cf->can_dlc;
+ memcpy(&cmd->u.tx_can.data[6], cf->data, cf->can_dlc);
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ *cmd_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
+ }
+ return cmd;
+}
+
+static int kvaser_usb_leaf_wait_cmd(const struct kvaser_usb *dev, u8 id,
+ struct kvaser_cmd *cmd)
+{
+ struct kvaser_cmd *tmp;
+ void *buf;
+ int actual_len;
+ int err;
+ int pos;
+ unsigned long to = jiffies + msecs_to_jiffies(KVASER_USB_TIMEOUT);
+
+ buf = kzalloc(KVASER_USB_RX_BUFFER_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ do {
+ err = kvaser_usb_recv_cmd(dev, buf, KVASER_USB_RX_BUFFER_SIZE,
+ &actual_len);
+ if (err < 0)
+ goto end;
+
+ pos = 0;
+ while (pos <= actual_len - CMD_HEADER_LEN) {
+ tmp = buf + pos;
+
+ /* Handle commands crossing the USB endpoint max packet
+ * size boundary. Check kvaser_usb_read_bulk_callback()
+ * for further details.
+ */
+ if (tmp->len == 0) {
+ pos = round_up(pos,
+ le16_to_cpu
+ (dev->bulk_in->wMaxPacketSize));
+ continue;
+ }
+
+ if (pos + tmp->len > actual_len) {
+ dev_err_ratelimited(&dev->intf->dev,
+ "Format error\n");
+ break;
+ }
+
+ if (tmp->id == id) {
+ memcpy(cmd, tmp, tmp->len);
+ goto end;
+ }
+
+ pos += tmp->len;
+ }
+ } while (time_before(jiffies, to));
+
+ err = -EINVAL;
+
+end:
+ kfree(buf);
+
+ return err;
+}
+
+static int kvaser_usb_leaf_send_simple_cmd(const struct kvaser_usb *dev,
+ u8 cmd_id, int channel)
+{
+ struct kvaser_cmd *cmd;
+ int rc;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->id = cmd_id;
+ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_simple);
+ cmd->u.simple.channel = channel;
+ cmd->u.simple.tid = 0xff;
+
+ rc = kvaser_usb_send_cmd(dev, cmd, cmd->len);
+
+ kfree(cmd);
+ return rc;
+}
+
+static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
+{
+ struct kvaser_cmd cmd;
+ int err;
+
+ err = kvaser_usb_leaf_send_simple_cmd(dev, CMD_GET_SOFTWARE_INFO, 0);
+ if (err)
+ return err;
+
+ err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_SOFTWARE_INFO_REPLY, &cmd);
+ if (err)
+ return err;
+
+ switch (dev->card_data.leaf.family) {
+ case KVASER_LEAF:
+ dev->fw_version = le32_to_cpu(cmd.u.leaf.softinfo.fw_version);
+ dev->max_tx_urbs =
+ le16_to_cpu(cmd.u.leaf.softinfo.max_outstanding_tx);
+ break;
+ case KVASER_USBCAN:
+ dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version);
+ dev->max_tx_urbs =
+ le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx);
+ break;
+ }
+
+ return 0;
+}
+
+static int kvaser_usb_leaf_get_software_info(struct kvaser_usb *dev)
+{
+ int err;
+ int retry = 3;
+
+ /* On some x86 laptops, plugging a Kvaser device again after
+ * an unplug makes the firmware always ignore the very first
+ * command. For such a case, provide some room for retries
+ * instead of completely exiting the driver.
+ */
+ do {
+ err = kvaser_usb_leaf_get_software_info_inner(dev);
+ } while (--retry && err == -ETIMEDOUT);
+
+ return err;
+}
+
+static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev)
+{
+ struct kvaser_cmd cmd;
+ int err;
+
+ err = kvaser_usb_leaf_send_simple_cmd(dev, CMD_GET_CARD_INFO, 0);
+ if (err)
+ return err;
+
+ err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_CARD_INFO_REPLY, &cmd);
+ if (err)
+ return err;
+
+ dev->nchannels = cmd.u.cardinfo.nchannels;
+ if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES ||
+ (dev->card_data.leaf.family == KVASER_USBCAN &&
+ dev->nchannels > MAX_USBCAN_NET_DEVICES))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct net_device_stats *stats;
+ struct kvaser_usb_tx_urb_context *context;
+ struct kvaser_usb_net_priv *priv;
+ unsigned long flags;
+ u8 channel, tid;
+
+ channel = cmd->u.tx_acknowledge_header.channel;
+ tid = cmd->u.tx_acknowledge_header.tid;
+
+ if (channel >= dev->nchannels) {
+ dev_err(&dev->intf->dev,
+ "Invalid channel number (%d)\n", channel);
+ return;
+ }
+
+ priv = dev->nets[channel];
+
+ if (!netif_device_present(priv->netdev))
+ return;
+
+ stats = &priv->netdev->stats;
+
+ context = &priv->tx_contexts[tid % dev->max_tx_urbs];
+
+ /* Sometimes the state change doesn't come after a bus-off event */
+ if (priv->can.restart_ms && priv->can.state >= CAN_STATE_BUS_OFF) {
+ struct sk_buff *skb;
+ struct can_frame *cf;
+
+ skb = alloc_can_err_skb(priv->netdev, &cf);
+ if (skb) {
+ cf->can_id |= CAN_ERR_RESTARTED;
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ } else {
+ netdev_err(priv->netdev,
+ "No memory left for err_skb\n");
+ }
+
+ priv->can.can_stats.restarts++;
+ netif_carrier_on(priv->netdev);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ }
+
+ stats->tx_packets++;
+ stats->tx_bytes += context->dlc;
+
+ spin_lock_irqsave(&priv->tx_contexts_lock, flags);
+
+ can_get_echo_skb(priv->netdev, context->echo_index);
+ context->echo_index = dev->max_tx_urbs;
+ --priv->active_tx_contexts;
+ netif_wake_queue(priv->netdev);
+
+ spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
+}
+
+static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv,
+ u8 cmd_id)
+{
+ struct kvaser_cmd *cmd;
+ int err;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_simple);
+ cmd->id = cmd_id;
+ cmd->u.simple.channel = priv->channel;
+
+ err = kvaser_usb_send_cmd_async(priv, cmd, cmd->len);
+ if (err)
+ kfree(cmd);
+
+ return err;
+}
+
+static void
+kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
+ const struct kvaser_usb_err_summary *es,
+ struct can_frame *cf)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct net_device_stats *stats = &priv->netdev->stats;
+ enum can_state cur_state, new_state, tx_state, rx_state;
+
+ netdev_dbg(priv->netdev, "Error status: 0x%02x\n", es->status);
+
+ new_state = priv->can.state;
+ cur_state = priv->can.state;
+
+ if (es->status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
+ new_state = CAN_STATE_BUS_OFF;
+ } else if (es->status & M16C_STATE_BUS_PASSIVE) {
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ } else if (es->status & M16C_STATE_BUS_ERROR) {
+ /* Guard against spurious error events after a busoff */
+ if (cur_state < CAN_STATE_BUS_OFF) {
+ if (es->txerr >= 128 || es->rxerr >= 128)
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ else if (es->txerr >= 96 || es->rxerr >= 96)
+ new_state = CAN_STATE_ERROR_WARNING;
+ else if (cur_state > CAN_STATE_ERROR_ACTIVE)
+ new_state = CAN_STATE_ERROR_ACTIVE;
+ }
+ }
+
+ if (!es->status)
+ new_state = CAN_STATE_ERROR_ACTIVE;
+
+ if (new_state != cur_state) {
+ tx_state = (es->txerr >= es->rxerr) ? new_state : 0;
+ rx_state = (es->txerr <= es->rxerr) ? new_state : 0;
+
+ can_change_state(priv->netdev, cf, tx_state, rx_state);
+ }
+
+ if (priv->can.restart_ms &&
+ cur_state >= CAN_STATE_BUS_OFF &&
+ new_state < CAN_STATE_BUS_OFF)
+ priv->can.can_stats.restarts++;
+
+ switch (dev->card_data.leaf.family) {
+ case KVASER_LEAF:
+ if (es->leaf.error_factor) {
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+ }
+ break;
+ case KVASER_USBCAN:
+ if (es->usbcan.error_state & USBCAN_ERROR_STATE_TX_ERROR)
+ stats->tx_errors++;
+ if (es->usbcan.error_state & USBCAN_ERROR_STATE_RX_ERROR)
+ stats->rx_errors++;
+ if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR)
+ priv->can.can_stats.bus_error++;
+ break;
+ }
+
+ priv->bec.txerr = es->txerr;
+ priv->bec.rxerr = es->rxerr;
+}
+
+static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
+ const struct kvaser_usb_err_summary *es)
+{
+ struct can_frame *cf;
+ struct can_frame tmp_cf = { .can_id = CAN_ERR_FLAG,
+ .can_dlc = CAN_ERR_DLC };
+ struct sk_buff *skb;
+ struct net_device_stats *stats;
+ struct kvaser_usb_net_priv *priv;
+ enum can_state old_state, new_state;
+
+ if (es->channel >= dev->nchannels) {
+ dev_err(&dev->intf->dev,
+ "Invalid channel number (%d)\n", es->channel);
+ return;
+ }
+
+ priv = dev->nets[es->channel];
+ stats = &priv->netdev->stats;
+
+ /* Update all of the CAN interface's state and error counters before
+ * trying any memory allocation that can actually fail with -ENOMEM.
+ *
+ * We send a temporary stack-allocated error CAN frame to
+ * can_change_state() for the very same reason.
+ *
+ * TODO: Split can_change_state() responsibility between updating the
+ * CAN interface's state and counters, and the setting up of CAN error
+ * frame ID and data to userspace. Remove stack allocation afterwards.
+ */
+ old_state = priv->can.state;
+ kvaser_usb_leaf_rx_error_update_can_state(priv, es, &tmp_cf);
+ new_state = priv->can.state;
+
+ skb = alloc_can_err_skb(priv->netdev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+ memcpy(cf, &tmp_cf, sizeof(*cf));
+
+ if (new_state != old_state) {
+ if (es->status &
+ (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
+ if (!priv->can.restart_ms)
+ kvaser_usb_leaf_simple_cmd_async(priv,
+ CMD_STOP_CHIP);
+ netif_carrier_off(priv->netdev);
+ }
+
+ if (priv->can.restart_ms &&
+ old_state >= CAN_STATE_BUS_OFF &&
+ new_state < CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_RESTARTED;
+ netif_carrier_on(priv->netdev);
+ }
+ }
+
+ switch (dev->card_data.leaf.family) {
+ case KVASER_LEAF:
+ if (es->leaf.error_factor) {
+ cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
+
+ if (es->leaf.error_factor & M16C_EF_ACKE)
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ if (es->leaf.error_factor & M16C_EF_CRCE)
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ if (es->leaf.error_factor & M16C_EF_FORME)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ if (es->leaf.error_factor & M16C_EF_STFE)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ if (es->leaf.error_factor & M16C_EF_BITE0)
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ if (es->leaf.error_factor & M16C_EF_BITE1)
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ if (es->leaf.error_factor & M16C_EF_TRE)
+ cf->data[2] |= CAN_ERR_PROT_TX;
+ }
+ break;
+ case KVASER_USBCAN:
+ if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR)
+ cf->can_id |= CAN_ERR_BUSERROR;
+ break;
+ }
+
+ cf->data[6] = es->txerr;
+ cf->data[7] = es->rxerr;
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+}
+
+/* For USBCAN, report error to userspace if the channels's errors counter
+ * has changed, or we're the only channel seeing a bus error state.
+ */
+static void
+kvaser_usb_leaf_usbcan_conditionally_rx_error(const struct kvaser_usb *dev,
+ struct kvaser_usb_err_summary *es)
+{
+ struct kvaser_usb_net_priv *priv;
+ unsigned int channel;
+ bool report_error;
+
+ channel = es->channel;
+ if (channel >= dev->nchannels) {
+ dev_err(&dev->intf->dev,
+ "Invalid channel number (%d)\n", channel);
+ return;
+ }
+
+ priv = dev->nets[channel];
+ report_error = false;
+
+ if (es->txerr != priv->bec.txerr) {
+ es->usbcan.error_state |= USBCAN_ERROR_STATE_TX_ERROR;
+ report_error = true;
+ }
+ if (es->rxerr != priv->bec.rxerr) {
+ es->usbcan.error_state |= USBCAN_ERROR_STATE_RX_ERROR;
+ report_error = true;
+ }
+ if ((es->status & M16C_STATE_BUS_ERROR) &&
+ !(es->usbcan.other_ch_status & M16C_STATE_BUS_ERROR)) {
+ es->usbcan.error_state |= USBCAN_ERROR_STATE_BUSERROR;
+ report_error = true;
+ }
+
+ if (report_error)
+ kvaser_usb_leaf_rx_error(dev, es);
+}
+
+static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_err_summary es = { };
+
+ switch (cmd->id) {
+ /* Sometimes errors are sent as unsolicited chip state events */
+ case CMD_CHIP_STATE_EVENT:
+ es.channel = cmd->u.usbcan.chip_state_event.channel;
+ es.status = cmd->u.usbcan.chip_state_event.status;
+ es.txerr = cmd->u.usbcan.chip_state_event.tx_errors_count;
+ es.rxerr = cmd->u.usbcan.chip_state_event.rx_errors_count;
+ kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
+ break;
+
+ case CMD_CAN_ERROR_EVENT:
+ es.channel = 0;
+ es.status = cmd->u.usbcan.error_event.status_ch0;
+ es.txerr = cmd->u.usbcan.error_event.tx_errors_count_ch0;
+ es.rxerr = cmd->u.usbcan.error_event.rx_errors_count_ch0;
+ es.usbcan.other_ch_status =
+ cmd->u.usbcan.error_event.status_ch1;
+ kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
+
+ /* The USBCAN firmware supports up to 2 channels.
+ * Now that ch0 was checked, check if ch1 has any errors.
+ */
+ if (dev->nchannels == MAX_USBCAN_NET_DEVICES) {
+ es.channel = 1;
+ es.status = cmd->u.usbcan.error_event.status_ch1;
+ es.txerr =
+ cmd->u.usbcan.error_event.tx_errors_count_ch1;
+ es.rxerr =
+ cmd->u.usbcan.error_event.rx_errors_count_ch1;
+ es.usbcan.other_ch_status =
+ cmd->u.usbcan.error_event.status_ch0;
+ kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
+ }
+ break;
+
+ default:
+ dev_err(&dev->intf->dev, "Invalid cmd id (%d)\n", cmd->id);
+ }
+}
+
+static void kvaser_usb_leaf_leaf_rx_error(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_err_summary es = { };
+
+ switch (cmd->id) {
+ case CMD_CAN_ERROR_EVENT:
+ es.channel = cmd->u.leaf.error_event.channel;
+ es.status = cmd->u.leaf.error_event.status;
+ es.txerr = cmd->u.leaf.error_event.tx_errors_count;
+ es.rxerr = cmd->u.leaf.error_event.rx_errors_count;
+ es.leaf.error_factor = cmd->u.leaf.error_event.error_factor;
+ break;
+ case CMD_LEAF_LOG_MESSAGE:
+ es.channel = cmd->u.leaf.log_message.channel;
+ es.status = cmd->u.leaf.log_message.data[0];
+ es.txerr = cmd->u.leaf.log_message.data[2];
+ es.rxerr = cmd->u.leaf.log_message.data[3];
+ es.leaf.error_factor = cmd->u.leaf.log_message.data[1];
+ break;
+ case CMD_CHIP_STATE_EVENT:
+ es.channel = cmd->u.leaf.chip_state_event.channel;
+ es.status = cmd->u.leaf.chip_state_event.status;
+ es.txerr = cmd->u.leaf.chip_state_event.tx_errors_count;
+ es.rxerr = cmd->u.leaf.chip_state_event.rx_errors_count;
+ es.leaf.error_factor = 0;
+ break;
+ default:
+ dev_err(&dev->intf->dev, "Invalid cmd id (%d)\n", cmd->id);
+ return;
+ }
+
+ kvaser_usb_leaf_rx_error(dev, &es);
+}
+
+static void kvaser_usb_leaf_rx_can_err(const struct kvaser_usb_net_priv *priv,
+ const struct kvaser_cmd *cmd)
+{
+ if (cmd->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
+ MSG_FLAG_NERR)) {
+ struct net_device_stats *stats = &priv->netdev->stats;
+
+ netdev_err(priv->netdev, "Unknown error (flags: 0x%02x)\n",
+ cmd->u.rx_can_header.flag);
+
+ stats->rx_errors++;
+ return;
+ }
+
+ if (cmd->u.rx_can_header.flag & MSG_FLAG_OVERRUN)
+ kvaser_usb_can_rx_over_error(priv->netdev);
+}
+
+static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct net_device_stats *stats;
+ u8 channel = cmd->u.rx_can_header.channel;
+ const u8 *rx_data = NULL; /* GCC */
+
+ if (channel >= dev->nchannels) {
+ dev_err(&dev->intf->dev,
+ "Invalid channel number (%d)\n", channel);
+ return;
+ }
+
+ priv = dev->nets[channel];
+ stats = &priv->netdev->stats;
+
+ if ((cmd->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) &&
+ (dev->card_data.leaf.family == KVASER_LEAF &&
+ cmd->id == CMD_LEAF_LOG_MESSAGE)) {
+ kvaser_usb_leaf_leaf_rx_error(dev, cmd);
+ return;
+ } else if (cmd->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
+ MSG_FLAG_NERR |
+ MSG_FLAG_OVERRUN)) {
+ kvaser_usb_leaf_rx_can_err(priv, cmd);
+ return;
+ } else if (cmd->u.rx_can_header.flag & ~MSG_FLAG_REMOTE_FRAME) {
+ netdev_warn(priv->netdev,
+ "Unhandled frame (flags: 0x%02x)\n",
+ cmd->u.rx_can_header.flag);
+ return;
+ }
+
+ switch (dev->card_data.leaf.family) {
+ case KVASER_LEAF:
+ rx_data = cmd->u.leaf.rx_can.data;
+ break;
+ case KVASER_USBCAN:
+ rx_data = cmd->u.usbcan.rx_can.data;
+ break;
+ }
+
+ skb = alloc_can_skb(priv->netdev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ if (dev->card_data.leaf.family == KVASER_LEAF && cmd->id ==
+ CMD_LEAF_LOG_MESSAGE) {
+ cf->can_id = le32_to_cpu(cmd->u.leaf.log_message.id);
+ if (cf->can_id & KVASER_EXTENDED_FRAME)
+ cf->can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
+ else
+ cf->can_id &= CAN_SFF_MASK;
+
+ cf->can_dlc = get_can_dlc(cmd->u.leaf.log_message.dlc);
+
+ if (cmd->u.leaf.log_message.flags & MSG_FLAG_REMOTE_FRAME)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(cf->data, &cmd->u.leaf.log_message.data,
+ cf->can_dlc);
+ } else {
+ cf->can_id = ((rx_data[0] & 0x1f) << 6) | (rx_data[1] & 0x3f);
+
+ if (cmd->id == CMD_RX_EXT_MESSAGE) {
+ cf->can_id <<= 18;
+ cf->can_id |= ((rx_data[2] & 0x0f) << 14) |
+ ((rx_data[3] & 0xff) << 6) |
+ (rx_data[4] & 0x3f);
+ cf->can_id |= CAN_EFF_FLAG;
+ }
+
+ cf->can_dlc = get_can_dlc(rx_data[5]);
+
+ if (cmd->u.rx_can_header.flag & MSG_FLAG_REMOTE_FRAME)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(cf->data, &rx_data[6], cf->can_dlc);
+ }
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+}
+
+static void kvaser_usb_leaf_start_chip_reply(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+ u8 channel = cmd->u.simple.channel;
+
+ if (channel >= dev->nchannels) {
+ dev_err(&dev->intf->dev,
+ "Invalid channel number (%d)\n", channel);
+ return;
+ }
+
+ priv = dev->nets[channel];
+
+ if (completion_done(&priv->start_comp) &&
+ netif_queue_stopped(priv->netdev)) {
+ netif_wake_queue(priv->netdev);
+ } else {
+ netif_start_queue(priv->netdev);
+ complete(&priv->start_comp);
+ }
+}
+
+static void kvaser_usb_leaf_stop_chip_reply(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+ u8 channel = cmd->u.simple.channel;
+
+ if (channel >= dev->nchannels) {
+ dev_err(&dev->intf->dev,
+ "Invalid channel number (%d)\n", channel);
+ return;
+ }
+
+ priv = dev->nets[channel];
+
+ complete(&priv->stop_comp);
+}
+
+static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ switch (cmd->id) {
+ case CMD_START_CHIP_REPLY:
+ kvaser_usb_leaf_start_chip_reply(dev, cmd);
+ break;
+
+ case CMD_STOP_CHIP_REPLY:
+ kvaser_usb_leaf_stop_chip_reply(dev, cmd);
+ break;
+
+ case CMD_RX_STD_MESSAGE:
+ case CMD_RX_EXT_MESSAGE:
+ kvaser_usb_leaf_rx_can_msg(dev, cmd);
+ break;
+
+ case CMD_LEAF_LOG_MESSAGE:
+ if (dev->card_data.leaf.family != KVASER_LEAF)
+ goto warn;
+ kvaser_usb_leaf_rx_can_msg(dev, cmd);
+ break;
+
+ case CMD_CHIP_STATE_EVENT:
+ case CMD_CAN_ERROR_EVENT:
+ if (dev->card_data.leaf.family == KVASER_LEAF)
+ kvaser_usb_leaf_leaf_rx_error(dev, cmd);
+ else
+ kvaser_usb_leaf_usbcan_rx_error(dev, cmd);
+ break;
+
+ case CMD_TX_ACKNOWLEDGE:
+ kvaser_usb_leaf_tx_acknowledge(dev, cmd);
+ break;
+
+ /* Ignored commands */
+ case CMD_USBCAN_CLOCK_OVERFLOW_EVENT:
+ if (dev->card_data.leaf.family != KVASER_USBCAN)
+ goto warn;
+ break;
+
+ case CMD_FLUSH_QUEUE_REPLY:
+ if (dev->card_data.leaf.family != KVASER_LEAF)
+ goto warn;
+ break;
+
+ default:
+warn: dev_warn(&dev->intf->dev, "Unhandled command (%d)\n", cmd->id);
+ break;
+ }
+}
+
+static void kvaser_usb_leaf_read_bulk_callback(struct kvaser_usb *dev,
+ void *buf, int len)
+{
+ struct kvaser_cmd *cmd;
+ int pos = 0;
+
+ while (pos <= len - CMD_HEADER_LEN) {
+ cmd = buf + pos;
+
+ /* The Kvaser firmware can only read and write commands that
+ * does not cross the USB's endpoint wMaxPacketSize boundary.
+ * If a follow-up command crosses such boundary, firmware puts
+ * a placeholder zero-length command in its place then aligns
+ * the real command to the next max packet size.
+ *
+ * Handle such cases or we're going to miss a significant
+ * number of events in case of a heavy rx load on the bus.
+ */
+ if (cmd->len == 0) {
+ pos = round_up(pos, le16_to_cpu
+ (dev->bulk_in->wMaxPacketSize));
+ continue;
+ }
+
+ if (pos + cmd->len > len) {
+ dev_err_ratelimited(&dev->intf->dev, "Format error\n");
+ break;
+ }
+
+ kvaser_usb_leaf_handle_command(dev, cmd);
+ pos += cmd->len;
+ }
+}
+
+static int kvaser_usb_leaf_set_opt_mode(const struct kvaser_usb_net_priv *priv)
+{
+ struct kvaser_cmd *cmd;
+ int rc;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->id = CMD_SET_CTRL_MODE;
+ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_ctrl_mode);
+ cmd->u.ctrl_mode.tid = 0xff;
+ cmd->u.ctrl_mode.channel = priv->channel;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ cmd->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_SILENT;
+ else
+ cmd->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_NORMAL;
+
+ rc = kvaser_usb_send_cmd(priv->dev, cmd, cmd->len);
+
+ kfree(cmd);
+ return rc;
+}
+
+static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv)
+{
+ int err;
+
+ init_completion(&priv->start_comp);
+
+ err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_START_CHIP,
+ priv->channel);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&priv->start_comp,
+ msecs_to_jiffies(KVASER_USB_TIMEOUT)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int kvaser_usb_leaf_stop_chip(struct kvaser_usb_net_priv *priv)
+{
+ int err;
+
+ init_completion(&priv->stop_comp);
+
+ err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_STOP_CHIP,
+ priv->channel);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&priv->stop_comp,
+ msecs_to_jiffies(KVASER_USB_TIMEOUT)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int kvaser_usb_leaf_reset_chip(struct kvaser_usb *dev, int channel)
+{
+ return kvaser_usb_leaf_send_simple_cmd(dev, CMD_RESET_CHIP, channel);
+}
+
+static int kvaser_usb_leaf_flush_queue(struct kvaser_usb_net_priv *priv)
+{
+ struct kvaser_cmd *cmd;
+ int rc;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->id = CMD_FLUSH_QUEUE;
+ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_flush_queue);
+ cmd->u.flush_queue.channel = priv->channel;
+ cmd->u.flush_queue.flags = 0x00;
+
+ rc = kvaser_usb_send_cmd(priv->dev, cmd, cmd->len);
+
+ kfree(cmd);
+ return rc;
+}
+
+static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev)
+{
+ struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
+
+ dev->cfg = &kvaser_usb_leaf_dev_cfg;
+ card_data->ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+
+ return 0;
+}
+
+static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
+ .name = "kvaser_usb",
+ .tseg1_min = KVASER_USB_TSEG1_MIN,
+ .tseg1_max = KVASER_USB_TSEG1_MAX,
+ .tseg2_min = KVASER_USB_TSEG2_MIN,
+ .tseg2_max = KVASER_USB_TSEG2_MAX,
+ .sjw_max = KVASER_USB_SJW_MAX,
+ .brp_min = KVASER_USB_BRP_MIN,
+ .brp_max = KVASER_USB_BRP_MAX,
+ .brp_inc = KVASER_USB_BRP_INC,
+};
+
+static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_cmd *cmd;
+ int rc;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->id = CMD_SET_BUS_PARAMS;
+ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_busparams);
+ cmd->u.busparams.channel = priv->channel;
+ cmd->u.busparams.tid = 0xff;
+ cmd->u.busparams.bitrate = cpu_to_le32(bt->bitrate);
+ cmd->u.busparams.sjw = bt->sjw;
+ cmd->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1;
+ cmd->u.busparams.tseg2 = bt->phase_seg2;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ cmd->u.busparams.no_samp = 3;
+ else
+ cmd->u.busparams.no_samp = 1;
+
+ rc = kvaser_usb_send_cmd(dev, cmd, cmd->len);
+
+ kfree(cmd);
+ return rc;
+}
+
+static int kvaser_usb_leaf_set_mode(struct net_device *netdev,
+ enum can_mode mode)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ int err;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ err = kvaser_usb_leaf_simple_cmd_async(priv, CMD_START_CHIP);
+ if (err)
+ return err;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int kvaser_usb_leaf_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+
+ *bec = priv->bec;
+
+ return 0;
+}
+
+static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev)
+{
+ const struct usb_host_interface *iface_desc;
+ struct usb_endpoint_descriptor *endpoint;
+ int i;
+
+ iface_desc = &dev->intf->altsetting[0];
+
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+ endpoint = &iface_desc->endpoint[i].desc;
+
+ if (!dev->bulk_in && usb_endpoint_is_bulk_in(endpoint))
+ dev->bulk_in = endpoint;
+
+ if (!dev->bulk_out && usb_endpoint_is_bulk_out(endpoint))
+ dev->bulk_out = endpoint;
+
+ /* use first bulk endpoint for in and out */
+ if (dev->bulk_in && dev->bulk_out)
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = {
+ .dev_set_mode = kvaser_usb_leaf_set_mode,
+ .dev_set_bittiming = kvaser_usb_leaf_set_bittiming,
+ .dev_set_data_bittiming = NULL,
+ .dev_get_berr_counter = kvaser_usb_leaf_get_berr_counter,
+ .dev_setup_endpoints = kvaser_usb_leaf_setup_endpoints,
+ .dev_init_card = kvaser_usb_leaf_init_card,
+ .dev_get_software_info = kvaser_usb_leaf_get_software_info,
+ .dev_get_software_details = NULL,
+ .dev_get_card_info = kvaser_usb_leaf_get_card_info,
+ .dev_get_capabilities = NULL,
+ .dev_set_opt_mode = kvaser_usb_leaf_set_opt_mode,
+ .dev_start_chip = kvaser_usb_leaf_start_chip,
+ .dev_stop_chip = kvaser_usb_leaf_stop_chip,
+ .dev_reset_chip = kvaser_usb_leaf_reset_chip,
+ .dev_flush_queue = kvaser_usb_leaf_flush_queue,
+ .dev_read_bulk_callback = kvaser_usb_leaf_read_bulk_callback,
+ .dev_frame_to_cmd = kvaser_usb_leaf_frame_to_cmd,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg = {
+ .clock = {
+ .freq = CAN_USB_CLOCK,
+ },
+ .timestamp_freq = 1,
+ .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+};
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index f530a80f5051..13238a72a338 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -423,6 +423,7 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
new_state = CAN_STATE_ERROR_WARNING;
break;
}
+ /* else: fall through */
case CAN_STATE_ERROR_WARNING:
if (n & PCAN_USB_ERROR_BUS_HEAVY) {
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 50e911428638..611f9d31be5d 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -353,6 +353,7 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
default:
netdev_warn(netdev, "tx urb submitting failed err=%d\n",
err);
+ /* fall through */
case -ENOENT:
/* cable unplugged */
stats->tx_dropped++;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 0105fbfea273..d516def846ab 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -141,8 +141,10 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, u8 id, ...)
switch (id) {
case PCAN_USBPRO_TXMSG8:
i += 4;
+ /* fall through */
case PCAN_USBPRO_TXMSG4:
i += 4;
+ /* fall through */
case PCAN_USBPRO_TXMSG0:
*pc++ = va_arg(ap, int);
*pc++ = va_arg(ap, int);
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
new file mode 100644
index 000000000000..0678a38b1af4
--- /dev/null
+++ b/drivers/net/can/usb/ucan.c
@@ -0,0 +1,1613 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Driver for Theobroma Systems UCAN devices, Protocol Version 3
+ *
+ * Copyright (C) 2018 Theobroma Systems Design und Consulting GmbH
+ *
+ *
+ * General Description:
+ *
+ * The USB Device uses three Endpoints:
+ *
+ * CONTROL Endpoint: Is used the setup the device (start, stop,
+ * info, configure).
+ *
+ * IN Endpoint: The device sends CAN Frame Messages and Device
+ * Information using the IN endpoint.
+ *
+ * OUT Endpoint: The driver sends configuration requests, and CAN
+ * Frames on the out endpoint.
+ *
+ * Error Handling:
+ *
+ * If error reporting is turned on the device encodes error into CAN
+ * error frames (see uapi/linux/can/error.h) and sends it using the
+ * IN Endpoint. The driver updates statistics and forward it.
+ */
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/signal.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#define UCAN_DRIVER_NAME "ucan"
+#define UCAN_MAX_RX_URBS 8
+/* the CAN controller needs a while to enable/disable the bus */
+#define UCAN_USB_CTL_PIPE_TIMEOUT 1000
+/* this driver currently supports protocol version 3 only */
+#define UCAN_PROTOCOL_VERSION_MIN 3
+#define UCAN_PROTOCOL_VERSION_MAX 3
+
+/* UCAN Message Definitions
+ * ------------------------
+ *
+ * ucan_message_out_t and ucan_message_in_t define the messages
+ * transmitted on the OUT and IN endpoint.
+ *
+ * Multibyte fields are transmitted with little endianness
+ *
+ * INTR Endpoint: a single uint32_t storing the current space in the fifo
+ *
+ * OUT Endpoint: single message of type ucan_message_out_t is
+ * transmitted on the out endpoint
+ *
+ * IN Endpoint: multiple messages ucan_message_in_t concateted in
+ * the following way:
+ *
+ * m[n].len <=> the length if message n(including the header in bytes)
+ * m[n] is is aligned to a 4 byte boundary, hence
+ * offset(m[0]) := 0;
+ * offset(m[n+1]) := offset(m[n]) + (m[n].len + 3) & 3
+ *
+ * this implies that
+ * offset(m[n]) % 4 <=> 0
+ */
+
+/* Device Global Commands */
+enum {
+ UCAN_DEVICE_GET_FW_STRING = 0,
+};
+
+/* UCAN Commands */
+enum {
+ /* start the can transceiver - val defines the operation mode */
+ UCAN_COMMAND_START = 0,
+ /* cancel pending transmissions and stop the can transceiver */
+ UCAN_COMMAND_STOP = 1,
+ /* send can transceiver into low-power sleep mode */
+ UCAN_COMMAND_SLEEP = 2,
+ /* wake up can transceiver from low-power sleep mode */
+ UCAN_COMMAND_WAKEUP = 3,
+ /* reset the can transceiver */
+ UCAN_COMMAND_RESET = 4,
+ /* get piece of info from the can transceiver - subcmd defines what
+ * piece
+ */
+ UCAN_COMMAND_GET = 5,
+ /* clear or disable hardware filter - subcmd defines which of the two */
+ UCAN_COMMAND_FILTER = 6,
+ /* Setup bittiming */
+ UCAN_COMMAND_SET_BITTIMING = 7,
+ /* recover from bus-off state */
+ UCAN_COMMAND_RESTART = 8,
+};
+
+/* UCAN_COMMAND_START and UCAN_COMMAND_GET_INFO operation modes (bitmap).
+ * Undefined bits must be set to 0.
+ */
+enum {
+ UCAN_MODE_LOOPBACK = BIT(0),
+ UCAN_MODE_SILENT = BIT(1),
+ UCAN_MODE_3_SAMPLES = BIT(2),
+ UCAN_MODE_ONE_SHOT = BIT(3),
+ UCAN_MODE_BERR_REPORT = BIT(4),
+};
+
+/* UCAN_COMMAND_GET subcommands */
+enum {
+ UCAN_COMMAND_GET_INFO = 0,
+ UCAN_COMMAND_GET_PROTOCOL_VERSION = 1,
+};
+
+/* UCAN_COMMAND_FILTER subcommands */
+enum {
+ UCAN_FILTER_CLEAR = 0,
+ UCAN_FILTER_DISABLE = 1,
+ UCAN_FILTER_ENABLE = 2,
+};
+
+/* OUT endpoint message types */
+enum {
+ UCAN_OUT_TX = 2, /* transmit a CAN frame */
+};
+
+/* IN endpoint message types */
+enum {
+ UCAN_IN_TX_COMPLETE = 1, /* CAN frame transmission completed */
+ UCAN_IN_RX = 2, /* CAN frame received */
+};
+
+struct ucan_ctl_cmd_start {
+ __le16 mode; /* OR-ing any of UCAN_MODE_* */
+} __packed;
+
+struct ucan_ctl_cmd_set_bittiming {
+ __le32 tq; /* Time quanta (TQ) in nanoseconds */
+ __le16 brp; /* TQ Prescaler */
+ __le16 sample_point; /* Samplepoint on tenth percent */
+ u8 prop_seg; /* Propagation segment in TQs */
+ u8 phase_seg1; /* Phase buffer segment 1 in TQs */
+ u8 phase_seg2; /* Phase buffer segment 2 in TQs */
+ u8 sjw; /* Synchronisation jump width in TQs */
+} __packed;
+
+struct ucan_ctl_cmd_device_info {
+ __le32 freq; /* Clock Frequency for tq generation */
+ u8 tx_fifo; /* Size of the transmission fifo */
+ u8 sjw_max; /* can_bittiming fields... */
+ u8 tseg1_min;
+ u8 tseg1_max;
+ u8 tseg2_min;
+ u8 tseg2_max;
+ __le16 brp_inc;
+ __le32 brp_min;
+ __le32 brp_max; /* ...can_bittiming fields */
+ __le16 ctrlmodes; /* supported control modes */
+ __le16 hwfilter; /* Number of HW filter banks */
+ __le16 rxmboxes; /* Number of receive Mailboxes */
+} __packed;
+
+struct ucan_ctl_cmd_get_protocol_version {
+ __le32 version;
+} __packed;
+
+union ucan_ctl_payload {
+ /* Setup Bittiming
+ * bmRequest == UCAN_COMMAND_START
+ */
+ struct ucan_ctl_cmd_start cmd_start;
+ /* Setup Bittiming
+ * bmRequest == UCAN_COMMAND_SET_BITTIMING
+ */
+ struct ucan_ctl_cmd_set_bittiming cmd_set_bittiming;
+ /* Get Device Information
+ * bmRequest == UCAN_COMMAND_GET; wValue = UCAN_COMMAND_GET_INFO
+ */
+ struct ucan_ctl_cmd_device_info cmd_get_device_info;
+ /* Get Protocol Version
+ * bmRequest == UCAN_COMMAND_GET;
+ * wValue = UCAN_COMMAND_GET_PROTOCOL_VERSION
+ */
+ struct ucan_ctl_cmd_get_protocol_version cmd_get_protocol_version;
+
+ u8 raw[128];
+} __packed;
+
+enum {
+ UCAN_TX_COMPLETE_SUCCESS = BIT(0),
+};
+
+/* Transmission Complete within ucan_message_in */
+struct ucan_tx_complete_entry_t {
+ u8 echo_index;
+ u8 flags;
+} __packed __aligned(0x2);
+
+/* CAN Data message format within ucan_message_in/out */
+struct ucan_can_msg {
+ /* note DLC is computed by
+ * msg.len - sizeof (msg.len)
+ * - sizeof (msg.type)
+ * - sizeof (msg.can_msg.id)
+ */
+ __le32 id;
+
+ union {
+ u8 data[CAN_MAX_DLEN]; /* Data of CAN frames */
+ u8 dlc; /* RTR dlc */
+ };
+} __packed;
+
+/* OUT Endpoint, outbound messages */
+struct ucan_message_out {
+ __le16 len; /* Length of the content include header */
+ u8 type; /* UCAN_OUT_TX and friends */
+ u8 subtype; /* command sub type */
+
+ union {
+ /* Transmit CAN frame
+ * (type == UCAN_TX) && ((msg.can_msg.id & CAN_RTR_FLAG) == 0)
+ * subtype stores the echo id
+ */
+ struct ucan_can_msg can_msg;
+ } msg;
+} __packed __aligned(0x4);
+
+/* IN Endpoint, inbound messages */
+struct ucan_message_in {
+ __le16 len; /* Length of the content include header */
+ u8 type; /* UCAN_IN_RX and friends */
+ u8 subtype; /* command sub type */
+
+ union {
+ /* CAN Frame received
+ * (type == UCAN_IN_RX)
+ * && ((msg.can_msg.id & CAN_RTR_FLAG) == 0)
+ */
+ struct ucan_can_msg can_msg;
+
+ /* CAN transmission complete
+ * (type == UCAN_IN_TX_COMPLETE)
+ */
+ struct ucan_tx_complete_entry_t can_tx_complete_msg[0];
+ } __aligned(0x4) msg;
+} __packed;
+
+/* Macros to calculate message lengths */
+#define UCAN_OUT_HDR_SIZE offsetof(struct ucan_message_out, msg)
+
+#define UCAN_IN_HDR_SIZE offsetof(struct ucan_message_in, msg)
+#define UCAN_IN_LEN(member) (UCAN_OUT_HDR_SIZE + sizeof(member))
+
+struct ucan_priv;
+
+/* Context Information for transmission URBs */
+struct ucan_urb_context {
+ struct ucan_priv *up;
+ u8 dlc;
+ bool allocated;
+};
+
+/* Information reported by the USB device */
+struct ucan_device_info {
+ struct can_bittiming_const bittiming_const;
+ u8 tx_fifo;
+};
+
+/* Driver private data */
+struct ucan_priv {
+ /* must be the first member */
+ struct can_priv can;
+
+ /* linux USB device structures */
+ struct usb_device *udev;
+ struct usb_interface *intf;
+ struct net_device *netdev;
+
+ /* lock for can->echo_skb (used around
+ * can_put/get/free_echo_skb
+ */
+ spinlock_t echo_skb_lock;
+
+ /* usb device information information */
+ u8 intf_index;
+ u8 in_ep_addr;
+ u8 out_ep_addr;
+ u16 in_ep_size;
+
+ /* transmission and reception buffers */
+ struct usb_anchor rx_urbs;
+ struct usb_anchor tx_urbs;
+
+ union ucan_ctl_payload *ctl_msg_buffer;
+ struct ucan_device_info device_info;
+
+ /* transmission control information and locks */
+ spinlock_t context_lock;
+ unsigned int available_tx_urbs;
+ struct ucan_urb_context *context_array;
+};
+
+static u8 ucan_get_can_dlc(struct ucan_can_msg *msg, u16 len)
+{
+ if (le32_to_cpu(msg->id) & CAN_RTR_FLAG)
+ return get_can_dlc(msg->dlc);
+ else
+ return get_can_dlc(len - (UCAN_IN_HDR_SIZE + sizeof(msg->id)));
+}
+
+static void ucan_release_context_array(struct ucan_priv *up)
+{
+ if (!up->context_array)
+ return;
+
+ /* lock is not needed because, driver is currently opening or closing */
+ up->available_tx_urbs = 0;
+
+ kfree(up->context_array);
+ up->context_array = NULL;
+}
+
+static int ucan_alloc_context_array(struct ucan_priv *up)
+{
+ int i;
+
+ /* release contexts if any */
+ ucan_release_context_array(up);
+
+ up->context_array = kcalloc(up->device_info.tx_fifo,
+ sizeof(*up->context_array),
+ GFP_KERNEL);
+ if (!up->context_array) {
+ netdev_err(up->netdev,
+ "Not enough memory to allocate tx contexts\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < up->device_info.tx_fifo; i++) {
+ up->context_array[i].allocated = false;
+ up->context_array[i].up = up;
+ }
+
+ /* lock is not needed because, driver is currently opening */
+ up->available_tx_urbs = up->device_info.tx_fifo;
+
+ return 0;
+}
+
+static struct ucan_urb_context *ucan_alloc_context(struct ucan_priv *up)
+{
+ int i;
+ unsigned long flags;
+ struct ucan_urb_context *ret = NULL;
+
+ if (WARN_ON_ONCE(!up->context_array))
+ return NULL;
+
+ /* execute context operation atomically */
+ spin_lock_irqsave(&up->context_lock, flags);
+
+ for (i = 0; i < up->device_info.tx_fifo; i++) {
+ if (!up->context_array[i].allocated) {
+ /* update context */
+ ret = &up->context_array[i];
+ up->context_array[i].allocated = true;
+
+ /* stop queue if necessary */
+ up->available_tx_urbs--;
+ if (!up->available_tx_urbs)
+ netif_stop_queue(up->netdev);
+
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&up->context_lock, flags);
+ return ret;
+}
+
+static bool ucan_release_context(struct ucan_priv *up,
+ struct ucan_urb_context *ctx)
+{
+ unsigned long flags;
+ bool ret = false;
+
+ if (WARN_ON_ONCE(!up->context_array))
+ return false;
+
+ /* execute context operation atomically */
+ spin_lock_irqsave(&up->context_lock, flags);
+
+ /* context was not allocated, maybe the device sent garbage */
+ if (ctx->allocated) {
+ ctx->allocated = false;
+
+ /* check if the queue needs to be woken */
+ if (!up->available_tx_urbs)
+ netif_wake_queue(up->netdev);
+ up->available_tx_urbs++;
+
+ ret = true;
+ }
+
+ spin_unlock_irqrestore(&up->context_lock, flags);
+ return ret;
+}
+
+static int ucan_ctrl_command_out(struct ucan_priv *up,
+ u8 cmd, u16 subcmd, u16 datalen)
+{
+ return usb_control_msg(up->udev,
+ usb_sndctrlpipe(up->udev, 0),
+ cmd,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE,
+ subcmd,
+ up->intf_index,
+ up->ctl_msg_buffer,
+ datalen,
+ UCAN_USB_CTL_PIPE_TIMEOUT);
+}
+
+static int ucan_device_request_in(struct ucan_priv *up,
+ u8 cmd, u16 subcmd, u16 datalen)
+{
+ return usb_control_msg(up->udev,
+ usb_rcvctrlpipe(up->udev, 0),
+ cmd,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ subcmd,
+ 0,
+ up->ctl_msg_buffer,
+ datalen,
+ UCAN_USB_CTL_PIPE_TIMEOUT);
+}
+
+/* Parse the device information structure reported by the device and
+ * setup private variables accordingly
+ */
+static void ucan_parse_device_info(struct ucan_priv *up,
+ struct ucan_ctl_cmd_device_info *device_info)
+{
+ struct can_bittiming_const *bittiming =
+ &up->device_info.bittiming_const;
+ u16 ctrlmodes;
+
+ /* store the data */
+ up->can.clock.freq = le32_to_cpu(device_info->freq);
+ up->device_info.tx_fifo = device_info->tx_fifo;
+ strcpy(bittiming->name, "ucan");
+ bittiming->tseg1_min = device_info->tseg1_min;
+ bittiming->tseg1_max = device_info->tseg1_max;
+ bittiming->tseg2_min = device_info->tseg2_min;
+ bittiming->tseg2_max = device_info->tseg2_max;
+ bittiming->sjw_max = device_info->sjw_max;
+ bittiming->brp_min = le32_to_cpu(device_info->brp_min);
+ bittiming->brp_max = le32_to_cpu(device_info->brp_max);
+ bittiming->brp_inc = le16_to_cpu(device_info->brp_inc);
+
+ ctrlmodes = le16_to_cpu(device_info->ctrlmodes);
+
+ up->can.ctrlmode_supported = 0;
+
+ if (ctrlmodes & UCAN_MODE_LOOPBACK)
+ up->can.ctrlmode_supported |= CAN_CTRLMODE_LOOPBACK;
+ if (ctrlmodes & UCAN_MODE_SILENT)
+ up->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
+ if (ctrlmodes & UCAN_MODE_3_SAMPLES)
+ up->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+ if (ctrlmodes & UCAN_MODE_ONE_SHOT)
+ up->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
+ if (ctrlmodes & UCAN_MODE_BERR_REPORT)
+ up->can.ctrlmode_supported |= CAN_CTRLMODE_BERR_REPORTING;
+}
+
+/* Handle a CAN error frame that we have received from the device.
+ * Returns true if the can state has changed.
+ */
+static bool ucan_handle_error_frame(struct ucan_priv *up,
+ struct ucan_message_in *m,
+ canid_t canid)
+{
+ enum can_state new_state = up->can.state;
+ struct net_device_stats *net_stats = &up->netdev->stats;
+ struct can_device_stats *can_stats = &up->can.can_stats;
+
+ if (canid & CAN_ERR_LOSTARB)
+ can_stats->arbitration_lost++;
+
+ if (canid & CAN_ERR_BUSERROR)
+ can_stats->bus_error++;
+
+ if (canid & CAN_ERR_ACK)
+ net_stats->tx_errors++;
+
+ if (canid & CAN_ERR_BUSOFF)
+ new_state = CAN_STATE_BUS_OFF;
+
+ /* controller problems, details in data[1] */
+ if (canid & CAN_ERR_CRTL) {
+ u8 d1 = m->msg.can_msg.data[1];
+
+ if (d1 & CAN_ERR_CRTL_RX_OVERFLOW)
+ net_stats->rx_over_errors++;
+
+ /* controller state bits: if multiple are set the worst wins */
+ if (d1 & CAN_ERR_CRTL_ACTIVE)
+ new_state = CAN_STATE_ERROR_ACTIVE;
+
+ if (d1 & (CAN_ERR_CRTL_RX_WARNING | CAN_ERR_CRTL_TX_WARNING))
+ new_state = CAN_STATE_ERROR_WARNING;
+
+ if (d1 & (CAN_ERR_CRTL_RX_PASSIVE | CAN_ERR_CRTL_TX_PASSIVE))
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ }
+
+ /* protocol error, details in data[2] */
+ if (canid & CAN_ERR_PROT) {
+ u8 d2 = m->msg.can_msg.data[2];
+
+ if (d2 & CAN_ERR_PROT_TX)
+ net_stats->tx_errors++;
+ else
+ net_stats->rx_errors++;
+ }
+
+ /* no state change - we are done */
+ if (up->can.state == new_state)
+ return false;
+
+ /* we switched into a better state */
+ if (up->can.state > new_state) {
+ up->can.state = new_state;
+ return true;
+ }
+
+ /* we switched into a worse state */
+ up->can.state = new_state;
+ switch (new_state) {
+ case CAN_STATE_BUS_OFF:
+ can_stats->bus_off++;
+ can_bus_off(up->netdev);
+ break;
+ case CAN_STATE_ERROR_PASSIVE:
+ can_stats->error_passive++;
+ break;
+ case CAN_STATE_ERROR_WARNING:
+ can_stats->error_warning++;
+ break;
+ default:
+ break;
+ }
+ return true;
+}
+
+/* Callback on reception of a can frame via the IN endpoint
+ *
+ * This function allocates an skb and transferres it to the Linux
+ * network stack
+ */
+static void ucan_rx_can_msg(struct ucan_priv *up, struct ucan_message_in *m)
+{
+ int len;
+ canid_t canid;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct net_device_stats *stats = &up->netdev->stats;
+
+ /* get the contents of the length field */
+ len = le16_to_cpu(m->len);
+
+ /* check sanity */
+ if (len < UCAN_IN_HDR_SIZE + sizeof(m->msg.can_msg.id)) {
+ netdev_warn(up->netdev, "invalid input message len: %d\n", len);
+ return;
+ }
+
+ /* handle error frames */
+ canid = le32_to_cpu(m->msg.can_msg.id);
+ if (canid & CAN_ERR_FLAG) {
+ bool busstate_changed = ucan_handle_error_frame(up, m, canid);
+
+ /* if berr-reporting is off only state changes get through */
+ if (!(up->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
+ !busstate_changed)
+ return;
+ } else {
+ canid_t canid_mask;
+ /* compute the mask for canid */
+ canid_mask = CAN_RTR_FLAG;
+ if (canid & CAN_EFF_FLAG)
+ canid_mask |= CAN_EFF_MASK | CAN_EFF_FLAG;
+ else
+ canid_mask |= CAN_SFF_MASK;
+
+ if (canid & ~canid_mask)
+ netdev_warn(up->netdev,
+ "unexpected bits set (canid %x, mask %x)",
+ canid, canid_mask);
+
+ canid &= canid_mask;
+ }
+
+ /* allocate skb */
+ skb = alloc_can_skb(up->netdev, &cf);
+ if (!skb)
+ return;
+
+ /* fill the can frame */
+ cf->can_id = canid;
+
+ /* compute DLC taking RTR_FLAG into account */
+ cf->can_dlc = ucan_get_can_dlc(&m->msg.can_msg, len);
+
+ /* copy the payload of non RTR frames */
+ if (!(cf->can_id & CAN_RTR_FLAG) || (cf->can_id & CAN_ERR_FLAG))
+ memcpy(cf->data, m->msg.can_msg.data, cf->can_dlc);
+
+ /* don't count error frames as real packets */
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ /* pass it to Linux */
+ netif_rx(skb);
+}
+
+/* callback indicating completed transmission */
+static void ucan_tx_complete_msg(struct ucan_priv *up,
+ struct ucan_message_in *m)
+{
+ unsigned long flags;
+ u16 count, i;
+ u8 echo_index, dlc;
+ u16 len = le16_to_cpu(m->len);
+
+ struct ucan_urb_context *context;
+
+ if (len < UCAN_IN_HDR_SIZE || (len % 2 != 0)) {
+ netdev_err(up->netdev, "invalid tx complete length\n");
+ return;
+ }
+
+ count = (len - UCAN_IN_HDR_SIZE) / 2;
+ for (i = 0; i < count; i++) {
+ /* we did not submit such echo ids */
+ echo_index = m->msg.can_tx_complete_msg[i].echo_index;
+ if (echo_index >= up->device_info.tx_fifo) {
+ up->netdev->stats.tx_errors++;
+ netdev_err(up->netdev,
+ "invalid echo_index %d received\n",
+ echo_index);
+ continue;
+ }
+
+ /* gather information from the context */
+ context = &up->context_array[echo_index];
+ dlc = READ_ONCE(context->dlc);
+
+ /* Release context and restart queue if necessary.
+ * Also check if the context was allocated
+ */
+ if (!ucan_release_context(up, context))
+ continue;
+
+ spin_lock_irqsave(&up->echo_skb_lock, flags);
+ if (m->msg.can_tx_complete_msg[i].flags &
+ UCAN_TX_COMPLETE_SUCCESS) {
+ /* update statistics */
+ up->netdev->stats.tx_packets++;
+ up->netdev->stats.tx_bytes += dlc;
+ can_get_echo_skb(up->netdev, echo_index);
+ } else {
+ up->netdev->stats.tx_dropped++;
+ can_free_echo_skb(up->netdev, echo_index);
+ }
+ spin_unlock_irqrestore(&up->echo_skb_lock, flags);
+ }
+}
+
+/* callback on reception of a USB message */
+static void ucan_read_bulk_callback(struct urb *urb)
+{
+ int ret;
+ int pos;
+ struct ucan_priv *up = urb->context;
+ struct net_device *netdev = up->netdev;
+ struct ucan_message_in *m;
+
+ /* the device is not up and the driver should not receive any
+ * data on the bulk in pipe
+ */
+ if (WARN_ON(!up->context_array)) {
+ usb_free_coherent(up->udev,
+ up->in_ep_size,
+ urb->transfer_buffer,
+ urb->transfer_dma);
+ return;
+ }
+
+ /* check URB status */
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -EPIPE:
+ case -EPROTO:
+ case -ESHUTDOWN:
+ case -ETIME:
+ /* urb is not resubmitted -> free dma data */
+ usb_free_coherent(up->udev,
+ up->in_ep_size,
+ urb->transfer_buffer,
+ urb->transfer_dma);
+ netdev_dbg(up->netdev, "not resumbmitting urb; status: %d\n",
+ urb->status);
+ return;
+ default:
+ goto resubmit;
+ }
+
+ /* sanity check */
+ if (!netif_device_present(netdev))
+ return;
+
+ /* iterate over input */
+ pos = 0;
+ while (pos < urb->actual_length) {
+ int len;
+
+ /* check sanity (length of header) */
+ if ((urb->actual_length - pos) < UCAN_IN_HDR_SIZE) {
+ netdev_warn(up->netdev,
+ "invalid message (short; no hdr; l:%d)\n",
+ urb->actual_length);
+ goto resubmit;
+ }
+
+ /* setup the message address */
+ m = (struct ucan_message_in *)
+ ((u8 *)urb->transfer_buffer + pos);
+ len = le16_to_cpu(m->len);
+
+ /* check sanity (length of content) */
+ if (urb->actual_length - pos < len) {
+ netdev_warn(up->netdev,
+ "invalid message (short; no data; l:%d)\n",
+ urb->actual_length);
+ print_hex_dump(KERN_WARNING,
+ "raw data: ",
+ DUMP_PREFIX_ADDRESS,
+ 16,
+ 1,
+ urb->transfer_buffer,
+ urb->actual_length,
+ true);
+
+ goto resubmit;
+ }
+
+ switch (m->type) {
+ case UCAN_IN_RX:
+ ucan_rx_can_msg(up, m);
+ break;
+ case UCAN_IN_TX_COMPLETE:
+ ucan_tx_complete_msg(up, m);
+ break;
+ default:
+ netdev_warn(up->netdev,
+ "invalid message (type; t:%d)\n",
+ m->type);
+ break;
+ }
+
+ /* proceed to next message */
+ pos += len;
+ /* align to 4 byte boundary */
+ pos = round_up(pos, 4);
+ }
+
+resubmit:
+ /* resubmit urb when done */
+ usb_fill_bulk_urb(urb, up->udev,
+ usb_rcvbulkpipe(up->udev,
+ up->in_ep_addr),
+ urb->transfer_buffer,
+ up->in_ep_size,
+ ucan_read_bulk_callback,
+ up);
+
+ usb_anchor_urb(urb, &up->rx_urbs);
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+
+ if (ret < 0) {
+ netdev_err(up->netdev,
+ "failed resubmitting read bulk urb: %d\n",
+ ret);
+
+ usb_unanchor_urb(urb);
+ usb_free_coherent(up->udev,
+ up->in_ep_size,
+ urb->transfer_buffer,
+ urb->transfer_dma);
+
+ if (ret == -ENODEV)
+ netif_device_detach(netdev);
+ }
+}
+
+/* callback after transmission of a USB message */
+static void ucan_write_bulk_callback(struct urb *urb)
+{
+ unsigned long flags;
+ struct ucan_priv *up;
+ struct ucan_urb_context *context = urb->context;
+
+ /* get the urb context */
+ if (WARN_ON_ONCE(!context))
+ return;
+
+ /* free up our allocated buffer */
+ usb_free_coherent(urb->dev,
+ sizeof(struct ucan_message_out),
+ urb->transfer_buffer,
+ urb->transfer_dma);
+
+ up = context->up;
+ if (WARN_ON_ONCE(!up))
+ return;
+
+ /* sanity check */
+ if (!netif_device_present(up->netdev))
+ return;
+
+ /* transmission failed (USB - the device will not send a TX complete) */
+ if (urb->status) {
+ netdev_warn(up->netdev,
+ "failed to transmit USB message to device: %d\n",
+ urb->status);
+
+ /* update counters an cleanup */
+ spin_lock_irqsave(&up->echo_skb_lock, flags);
+ can_free_echo_skb(up->netdev, context - up->context_array);
+ spin_unlock_irqrestore(&up->echo_skb_lock, flags);
+
+ up->netdev->stats.tx_dropped++;
+
+ /* release context and restart the queue if necessary */
+ if (!ucan_release_context(up, context))
+ netdev_err(up->netdev,
+ "urb failed, failed to release context\n");
+ }
+}
+
+static void ucan_cleanup_rx_urbs(struct ucan_priv *up, struct urb **urbs)
+{
+ int i;
+
+ for (i = 0; i < UCAN_MAX_RX_URBS; i++) {
+ if (urbs[i]) {
+ usb_unanchor_urb(urbs[i]);
+ usb_free_coherent(up->udev,
+ up->in_ep_size,
+ urbs[i]->transfer_buffer,
+ urbs[i]->transfer_dma);
+ usb_free_urb(urbs[i]);
+ }
+ }
+
+ memset(urbs, 0, sizeof(*urbs) * UCAN_MAX_RX_URBS);
+}
+
+static int ucan_prepare_and_anchor_rx_urbs(struct ucan_priv *up,
+ struct urb **urbs)
+{
+ int i;
+
+ memset(urbs, 0, sizeof(*urbs) * UCAN_MAX_RX_URBS);
+
+ for (i = 0; i < UCAN_MAX_RX_URBS; i++) {
+ void *buf;
+
+ urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urbs[i])
+ goto err;
+
+ buf = usb_alloc_coherent(up->udev,
+ up->in_ep_size,
+ GFP_KERNEL, &urbs[i]->transfer_dma);
+ if (!buf) {
+ /* cleanup this urb */
+ usb_free_urb(urbs[i]);
+ urbs[i] = NULL;
+ goto err;
+ }
+
+ usb_fill_bulk_urb(urbs[i], up->udev,
+ usb_rcvbulkpipe(up->udev,
+ up->in_ep_addr),
+ buf,
+ up->in_ep_size,
+ ucan_read_bulk_callback,
+ up);
+
+ urbs[i]->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ usb_anchor_urb(urbs[i], &up->rx_urbs);
+ }
+ return 0;
+
+err:
+ /* cleanup other unsubmitted urbs */
+ ucan_cleanup_rx_urbs(up, urbs);
+ return -ENOMEM;
+}
+
+/* Submits rx urbs with the semantic: Either submit all, or cleanup
+ * everything. I case of errors submitted urbs are killed and all urbs in
+ * the array are freed. I case of no errors every entry in the urb
+ * array is set to NULL.
+ */
+static int ucan_submit_rx_urbs(struct ucan_priv *up, struct urb **urbs)
+{
+ int i, ret;
+
+ /* Iterate over all urbs to submit. On success remove the urb
+ * from the list.
+ */
+ for (i = 0; i < UCAN_MAX_RX_URBS; i++) {
+ ret = usb_submit_urb(urbs[i], GFP_KERNEL);
+ if (ret) {
+ netdev_err(up->netdev,
+ "could not submit urb; code: %d\n",
+ ret);
+ goto err;
+ }
+
+ /* Anchor URB and drop reference, USB core will take
+ * care of freeing it
+ */
+ usb_free_urb(urbs[i]);
+ urbs[i] = NULL;
+ }
+ return 0;
+
+err:
+ /* Cleanup unsubmitted urbs */
+ ucan_cleanup_rx_urbs(up, urbs);
+
+ /* Kill urbs that are already submitted */
+ usb_kill_anchored_urbs(&up->rx_urbs);
+
+ return ret;
+}
+
+/* Open the network device */
+static int ucan_open(struct net_device *netdev)
+{
+ int ret, ret_cleanup;
+ u16 ctrlmode;
+ struct urb *urbs[UCAN_MAX_RX_URBS];
+ struct ucan_priv *up = netdev_priv(netdev);
+
+ ret = ucan_alloc_context_array(up);
+ if (ret)
+ return ret;
+
+ /* Allocate and prepare IN URBS - allocated and anchored
+ * urbs are stored in urbs[] for clean
+ */
+ ret = ucan_prepare_and_anchor_rx_urbs(up, urbs);
+ if (ret)
+ goto err_contexts;
+
+ /* Check the control mode */
+ ctrlmode = 0;
+ if (up->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ ctrlmode |= UCAN_MODE_LOOPBACK;
+ if (up->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ ctrlmode |= UCAN_MODE_SILENT;
+ if (up->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ ctrlmode |= UCAN_MODE_3_SAMPLES;
+ if (up->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ ctrlmode |= UCAN_MODE_ONE_SHOT;
+
+ /* Enable this in any case - filtering is down within the
+ * receive path
+ */
+ ctrlmode |= UCAN_MODE_BERR_REPORT;
+ up->ctl_msg_buffer->cmd_start.mode = cpu_to_le16(ctrlmode);
+
+ /* Driver is ready to receive data - start the USB device */
+ ret = ucan_ctrl_command_out(up, UCAN_COMMAND_START, 0, 2);
+ if (ret < 0) {
+ netdev_err(up->netdev,
+ "could not start device, code: %d\n",
+ ret);
+ goto err_reset;
+ }
+
+ /* Call CAN layer open */
+ ret = open_candev(netdev);
+ if (ret)
+ goto err_stop;
+
+ /* Driver is ready to receive data. Submit RX URBS */
+ ret = ucan_submit_rx_urbs(up, urbs);
+ if (ret)
+ goto err_stop;
+
+ up->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ /* Start the network queue */
+ netif_start_queue(netdev);
+
+ return 0;
+
+err_stop:
+ /* The device have started already stop it */
+ ret_cleanup = ucan_ctrl_command_out(up, UCAN_COMMAND_STOP, 0, 0);
+ if (ret_cleanup < 0)
+ netdev_err(up->netdev,
+ "could not stop device, code: %d\n",
+ ret_cleanup);
+
+err_reset:
+ /* The device might have received data, reset it for
+ * consistent state
+ */
+ ret_cleanup = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0);
+ if (ret_cleanup < 0)
+ netdev_err(up->netdev,
+ "could not reset device, code: %d\n",
+ ret_cleanup);
+
+ /* clean up unsubmitted urbs */
+ ucan_cleanup_rx_urbs(up, urbs);
+
+err_contexts:
+ ucan_release_context_array(up);
+ return ret;
+}
+
+static struct urb *ucan_prepare_tx_urb(struct ucan_priv *up,
+ struct ucan_urb_context *context,
+ struct can_frame *cf,
+ u8 echo_index)
+{
+ int mlen;
+ struct urb *urb;
+ struct ucan_message_out *m;
+
+ /* create a URB, and a buffer for it, and copy the data to the URB */
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ netdev_err(up->netdev, "no memory left for URBs\n");
+ return NULL;
+ }
+
+ m = usb_alloc_coherent(up->udev,
+ sizeof(struct ucan_message_out),
+ GFP_ATOMIC,
+ &urb->transfer_dma);
+ if (!m) {
+ netdev_err(up->netdev, "no memory left for USB buffer\n");
+ usb_free_urb(urb);
+ return NULL;
+ }
+
+ /* build the USB message */
+ m->type = UCAN_OUT_TX;
+ m->msg.can_msg.id = cpu_to_le32(cf->can_id);
+
+ if (cf->can_id & CAN_RTR_FLAG) {
+ mlen = UCAN_OUT_HDR_SIZE +
+ offsetof(struct ucan_can_msg, dlc) +
+ sizeof(m->msg.can_msg.dlc);
+ m->msg.can_msg.dlc = cf->can_dlc;
+ } else {
+ mlen = UCAN_OUT_HDR_SIZE +
+ sizeof(m->msg.can_msg.id) + cf->can_dlc;
+ memcpy(m->msg.can_msg.data, cf->data, cf->can_dlc);
+ }
+ m->len = cpu_to_le16(mlen);
+
+ context->dlc = cf->can_dlc;
+
+ m->subtype = echo_index;
+
+ /* build the urb */
+ usb_fill_bulk_urb(urb, up->udev,
+ usb_sndbulkpipe(up->udev,
+ up->out_ep_addr),
+ m, mlen, ucan_write_bulk_callback, context);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ return urb;
+}
+
+static void ucan_clean_up_tx_urb(struct ucan_priv *up, struct urb *urb)
+{
+ usb_free_coherent(up->udev, sizeof(struct ucan_message_out),
+ urb->transfer_buffer, urb->transfer_dma);
+ usb_free_urb(urb);
+}
+
+/* callback when Linux needs to send a can frame */
+static netdev_tx_t ucan_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ unsigned long flags;
+ int ret;
+ u8 echo_index;
+ struct urb *urb;
+ struct ucan_urb_context *context;
+ struct ucan_priv *up = netdev_priv(netdev);
+ struct can_frame *cf = (struct can_frame *)skb->data;
+
+ /* check skb */
+ if (can_dropped_invalid_skb(netdev, skb))
+ return NETDEV_TX_OK;
+
+ /* allocate a context and slow down tx path, if fifo state is low */
+ context = ucan_alloc_context(up);
+ echo_index = context - up->context_array;
+
+ if (WARN_ON_ONCE(!context))
+ return NETDEV_TX_BUSY;
+
+ /* prepare urb for transmission */
+ urb = ucan_prepare_tx_urb(up, context, cf, echo_index);
+ if (!urb)
+ goto drop;
+
+ /* put the skb on can loopback stack */
+ spin_lock_irqsave(&up->echo_skb_lock, flags);
+ can_put_echo_skb(skb, up->netdev, echo_index);
+ spin_unlock_irqrestore(&up->echo_skb_lock, flags);
+
+ /* transmit it */
+ usb_anchor_urb(urb, &up->tx_urbs);
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+
+ /* cleanup urb */
+ if (ret) {
+ /* on error, clean up */
+ usb_unanchor_urb(urb);
+ ucan_clean_up_tx_urb(up, urb);
+ if (!ucan_release_context(up, context))
+ netdev_err(up->netdev,
+ "xmit err: failed to release context\n");
+
+ /* remove the skb from the echo stack - this also
+ * frees the skb
+ */
+ spin_lock_irqsave(&up->echo_skb_lock, flags);
+ can_free_echo_skb(up->netdev, echo_index);
+ spin_unlock_irqrestore(&up->echo_skb_lock, flags);
+
+ if (ret == -ENODEV) {
+ netif_device_detach(up->netdev);
+ } else {
+ netdev_warn(up->netdev,
+ "xmit err: failed to submit urb %d\n",
+ ret);
+ up->netdev->stats.tx_dropped++;
+ }
+ return NETDEV_TX_OK;
+ }
+
+ netif_trans_update(netdev);
+
+ /* release ref, as we do not need the urb anymore */
+ usb_free_urb(urb);
+
+ return NETDEV_TX_OK;
+
+drop:
+ if (!ucan_release_context(up, context))
+ netdev_err(up->netdev,
+ "xmit drop: failed to release context\n");
+ dev_kfree_skb(skb);
+ up->netdev->stats.tx_dropped++;
+
+ return NETDEV_TX_OK;
+}
+
+/* Device goes down
+ *
+ * Clean up used resources
+ */
+static int ucan_close(struct net_device *netdev)
+{
+ int ret;
+ struct ucan_priv *up = netdev_priv(netdev);
+
+ up->can.state = CAN_STATE_STOPPED;
+
+ /* stop sending data */
+ usb_kill_anchored_urbs(&up->tx_urbs);
+
+ /* stop receiving data */
+ usb_kill_anchored_urbs(&up->rx_urbs);
+
+ /* stop and reset can device */
+ ret = ucan_ctrl_command_out(up, UCAN_COMMAND_STOP, 0, 0);
+ if (ret < 0)
+ netdev_err(up->netdev,
+ "could not stop device, code: %d\n",
+ ret);
+
+ ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0);
+ if (ret < 0)
+ netdev_err(up->netdev,
+ "could not reset device, code: %d\n",
+ ret);
+
+ netif_stop_queue(netdev);
+
+ ucan_release_context_array(up);
+
+ close_candev(up->netdev);
+ return 0;
+}
+
+/* CAN driver callbacks */
+static const struct net_device_ops ucan_netdev_ops = {
+ .ndo_open = ucan_open,
+ .ndo_stop = ucan_close,
+ .ndo_start_xmit = ucan_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+/* Request to set bittiming
+ *
+ * This function generates an USB set bittiming message and transmits
+ * it to the device
+ */
+static int ucan_set_bittiming(struct net_device *netdev)
+{
+ int ret;
+ struct ucan_priv *up = netdev_priv(netdev);
+ struct ucan_ctl_cmd_set_bittiming *cmd_set_bittiming;
+
+ cmd_set_bittiming = &up->ctl_msg_buffer->cmd_set_bittiming;
+ cmd_set_bittiming->tq = cpu_to_le32(up->can.bittiming.tq);
+ cmd_set_bittiming->brp = cpu_to_le16(up->can.bittiming.brp);
+ cmd_set_bittiming->sample_point =
+ cpu_to_le16(up->can.bittiming.sample_point);
+ cmd_set_bittiming->prop_seg = up->can.bittiming.prop_seg;
+ cmd_set_bittiming->phase_seg1 = up->can.bittiming.phase_seg1;
+ cmd_set_bittiming->phase_seg2 = up->can.bittiming.phase_seg2;
+ cmd_set_bittiming->sjw = up->can.bittiming.sjw;
+
+ ret = ucan_ctrl_command_out(up, UCAN_COMMAND_SET_BITTIMING, 0,
+ sizeof(*cmd_set_bittiming));
+ return (ret < 0) ? ret : 0;
+}
+
+/* Restart the device to get it out of BUS-OFF state.
+ * Called when the user runs "ip link set can1 type can restart".
+ */
+static int ucan_set_mode(struct net_device *netdev, enum can_mode mode)
+{
+ int ret;
+ unsigned long flags;
+ struct ucan_priv *up = netdev_priv(netdev);
+
+ switch (mode) {
+ case CAN_MODE_START:
+ netdev_dbg(up->netdev, "restarting device\n");
+
+ ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESTART, 0, 0);
+ up->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ /* check if queue can be restarted,
+ * up->available_tx_urbs must be protected by the
+ * lock
+ */
+ spin_lock_irqsave(&up->context_lock, flags);
+
+ if (up->available_tx_urbs > 0)
+ netif_wake_queue(up->netdev);
+
+ spin_unlock_irqrestore(&up->context_lock, flags);
+
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* Probe the device, reset it and gather general device information */
+static int ucan_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ int ret;
+ int i;
+ u32 protocol_version;
+ struct usb_device *udev;
+ struct net_device *netdev;
+ struct usb_host_interface *iface_desc;
+ struct ucan_priv *up;
+ struct usb_endpoint_descriptor *ep;
+ u16 in_ep_size;
+ u16 out_ep_size;
+ u8 in_ep_addr;
+ u8 out_ep_addr;
+ union ucan_ctl_payload *ctl_msg_buffer;
+ char firmware_str[sizeof(union ucan_ctl_payload) + 1];
+
+ udev = interface_to_usbdev(intf);
+
+ /* Stage 1 - Interface Parsing
+ * ---------------------------
+ *
+ * Identifie the device USB interface descriptor and its
+ * endpoints. Probing is aborted on errors.
+ */
+
+ /* check if the interface is sane */
+ iface_desc = intf->cur_altsetting;
+ if (!iface_desc)
+ return -ENODEV;
+
+ dev_info(&udev->dev,
+ "%s: probing device on interface #%d\n",
+ UCAN_DRIVER_NAME,
+ iface_desc->desc.bInterfaceNumber);
+
+ /* interface sanity check */
+ if (iface_desc->desc.bNumEndpoints != 2) {
+ dev_err(&udev->dev,
+ "%s: invalid EP count (%d)",
+ UCAN_DRIVER_NAME, iface_desc->desc.bNumEndpoints);
+ goto err_firmware_needs_update;
+ }
+
+ /* check interface endpoints */
+ in_ep_addr = 0;
+ out_ep_addr = 0;
+ in_ep_size = 0;
+ out_ep_size = 0;
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
+ ep = &iface_desc->endpoint[i].desc;
+
+ if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) != 0) &&
+ ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_BULK)) {
+ /* In Endpoint */
+ in_ep_addr = ep->bEndpointAddress;
+ in_ep_addr &= USB_ENDPOINT_NUMBER_MASK;
+ in_ep_size = le16_to_cpu(ep->wMaxPacketSize);
+ } else if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ==
+ 0) &&
+ ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_BULK)) {
+ /* Out Endpoint */
+ out_ep_addr = ep->bEndpointAddress;
+ out_ep_addr &= USB_ENDPOINT_NUMBER_MASK;
+ out_ep_size = le16_to_cpu(ep->wMaxPacketSize);
+ }
+ }
+
+ /* check if interface is sane */
+ if (!in_ep_addr || !out_ep_addr) {
+ dev_err(&udev->dev, "%s: invalid endpoint configuration\n",
+ UCAN_DRIVER_NAME);
+ goto err_firmware_needs_update;
+ }
+ if (in_ep_size < sizeof(struct ucan_message_in)) {
+ dev_err(&udev->dev, "%s: invalid in_ep MaxPacketSize\n",
+ UCAN_DRIVER_NAME);
+ goto err_firmware_needs_update;
+ }
+ if (out_ep_size < sizeof(struct ucan_message_out)) {
+ dev_err(&udev->dev, "%s: invalid out_ep MaxPacketSize\n",
+ UCAN_DRIVER_NAME);
+ goto err_firmware_needs_update;
+ }
+
+ /* Stage 2 - Device Identification
+ * -------------------------------
+ *
+ * The device interface seems to be a ucan device. Do further
+ * compatibility checks. On error probing is aborted, on
+ * success this stage leaves the ctl_msg_buffer with the
+ * reported contents of a GET_INFO command (supported
+ * bittimings, tx_fifo depth). This information is used in
+ * Stage 3 for the final driver initialisation.
+ */
+
+ /* Prepare Memory for control transferes */
+ ctl_msg_buffer = devm_kzalloc(&udev->dev,
+ sizeof(union ucan_ctl_payload),
+ GFP_KERNEL);
+ if (!ctl_msg_buffer) {
+ dev_err(&udev->dev,
+ "%s: failed to allocate control pipe memory\n",
+ UCAN_DRIVER_NAME);
+ return -ENOMEM;
+ }
+
+ /* get protocol version
+ *
+ * note: ucan_ctrl_command_* wrappers cannot be used yet
+ * because `up` is initialised in Stage 3
+ */
+ ret = usb_control_msg(udev,
+ usb_rcvctrlpipe(udev, 0),
+ UCAN_COMMAND_GET,
+ USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE,
+ UCAN_COMMAND_GET_PROTOCOL_VERSION,
+ iface_desc->desc.bInterfaceNumber,
+ ctl_msg_buffer,
+ sizeof(union ucan_ctl_payload),
+ UCAN_USB_CTL_PIPE_TIMEOUT);
+
+ /* older firmware version do not support this command - those
+ * are not supported by this drive
+ */
+ if (ret != 4) {
+ dev_err(&udev->dev,
+ "%s: could not read protocol version, ret=%d\n",
+ UCAN_DRIVER_NAME, ret);
+ if (ret >= 0)
+ ret = -EINVAL;
+ goto err_firmware_needs_update;
+ }
+
+ /* this driver currently supports protocol version 3 only */
+ protocol_version =
+ le32_to_cpu(ctl_msg_buffer->cmd_get_protocol_version.version);
+ if (protocol_version < UCAN_PROTOCOL_VERSION_MIN ||
+ protocol_version > UCAN_PROTOCOL_VERSION_MAX) {
+ dev_err(&udev->dev,
+ "%s: device protocol version %d is not supported\n",
+ UCAN_DRIVER_NAME, protocol_version);
+ goto err_firmware_needs_update;
+ }
+
+ /* request the device information and store it in ctl_msg_buffer
+ *
+ * note: ucan_ctrl_command_* wrappers connot be used yet
+ * because `up` is initialised in Stage 3
+ */
+ ret = usb_control_msg(udev,
+ usb_rcvctrlpipe(udev, 0),
+ UCAN_COMMAND_GET,
+ USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE,
+ UCAN_COMMAND_GET_INFO,
+ iface_desc->desc.bInterfaceNumber,
+ ctl_msg_buffer,
+ sizeof(ctl_msg_buffer->cmd_get_device_info),
+ UCAN_USB_CTL_PIPE_TIMEOUT);
+
+ if (ret < 0) {
+ dev_err(&udev->dev, "%s: failed to retrieve device info\n",
+ UCAN_DRIVER_NAME);
+ goto err_firmware_needs_update;
+ }
+ if (ret < sizeof(ctl_msg_buffer->cmd_get_device_info)) {
+ dev_err(&udev->dev, "%s: device reported invalid device info\n",
+ UCAN_DRIVER_NAME);
+ goto err_firmware_needs_update;
+ }
+ if (ctl_msg_buffer->cmd_get_device_info.tx_fifo == 0) {
+ dev_err(&udev->dev,
+ "%s: device reported invalid tx-fifo size\n",
+ UCAN_DRIVER_NAME);
+ goto err_firmware_needs_update;
+ }
+
+ /* Stage 3 - Driver Initialisation
+ * -------------------------------
+ *
+ * Register device to Linux, prepare private structures and
+ * reset the device.
+ */
+
+ /* allocate driver resources */
+ netdev = alloc_candev(sizeof(struct ucan_priv),
+ ctl_msg_buffer->cmd_get_device_info.tx_fifo);
+ if (!netdev) {
+ dev_err(&udev->dev,
+ "%s: cannot allocate candev\n", UCAN_DRIVER_NAME);
+ return -ENOMEM;
+ }
+
+ up = netdev_priv(netdev);
+
+ /* initialze data */
+ up->udev = udev;
+ up->intf = intf;
+ up->netdev = netdev;
+ up->intf_index = iface_desc->desc.bInterfaceNumber;
+ up->in_ep_addr = in_ep_addr;
+ up->out_ep_addr = out_ep_addr;
+ up->in_ep_size = in_ep_size;
+ up->ctl_msg_buffer = ctl_msg_buffer;
+ up->context_array = NULL;
+ up->available_tx_urbs = 0;
+
+ up->can.state = CAN_STATE_STOPPED;
+ up->can.bittiming_const = &up->device_info.bittiming_const;
+ up->can.do_set_bittiming = ucan_set_bittiming;
+ up->can.do_set_mode = &ucan_set_mode;
+ spin_lock_init(&up->context_lock);
+ spin_lock_init(&up->echo_skb_lock);
+ netdev->netdev_ops = &ucan_netdev_ops;
+
+ usb_set_intfdata(intf, up);
+ SET_NETDEV_DEV(netdev, &intf->dev);
+
+ /* parse device information
+ * the data retrieved in Stage 2 is still available in
+ * up->ctl_msg_buffer
+ */
+ ucan_parse_device_info(up, &ctl_msg_buffer->cmd_get_device_info);
+
+ /* just print some device information - if available */
+ ret = ucan_device_request_in(up, UCAN_DEVICE_GET_FW_STRING, 0,
+ sizeof(union ucan_ctl_payload));
+ if (ret > 0) {
+ /* copy string while ensuring zero terminiation */
+ strncpy(firmware_str, up->ctl_msg_buffer->raw,
+ sizeof(union ucan_ctl_payload));
+ firmware_str[sizeof(union ucan_ctl_payload)] = '\0';
+ } else {
+ strcpy(firmware_str, "unknown");
+ }
+
+ /* device is compatible, reset it */
+ ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0);
+ if (ret < 0)
+ goto err_free_candev;
+
+ init_usb_anchor(&up->rx_urbs);
+ init_usb_anchor(&up->tx_urbs);
+
+ up->can.state = CAN_STATE_STOPPED;
+
+ /* register the device */
+ ret = register_candev(netdev);
+ if (ret)
+ goto err_free_candev;
+
+ /* initialisation complete, log device info */
+ netdev_info(up->netdev, "registered device\n");
+ netdev_info(up->netdev, "firmware string: %s\n", firmware_str);
+
+ /* success */
+ return 0;
+
+err_free_candev:
+ free_candev(netdev);
+ return ret;
+
+err_firmware_needs_update:
+ dev_err(&udev->dev,
+ "%s: probe failed; try to update the device firmware\n",
+ UCAN_DRIVER_NAME);
+ return -ENODEV;
+}
+
+/* disconnect the device */
+static void ucan_disconnect(struct usb_interface *intf)
+{
+ struct usb_device *udev;
+ struct ucan_priv *up = usb_get_intfdata(intf);
+
+ udev = interface_to_usbdev(intf);
+
+ usb_set_intfdata(intf, NULL);
+
+ if (up) {
+ unregister_netdev(up->netdev);
+ free_candev(up->netdev);
+ }
+}
+
+static struct usb_device_id ucan_table[] = {
+ /* Mule (soldered onto compute modules) */
+ {USB_DEVICE_INTERFACE_NUMBER(0x2294, 0x425a, 0)},
+ /* Seal (standalone USB stick) */
+ {USB_DEVICE_INTERFACE_NUMBER(0x2294, 0x425b, 0)},
+ {} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, ucan_table);
+/* driver callbacks */
+static struct usb_driver ucan_driver = {
+ .name = UCAN_DRIVER_NAME,
+ .probe = ucan_probe,
+ .disconnect = ucan_disconnect,
+ .id_table = ucan_table,
+};
+
+module_usb_driver(ucan_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Martin Elshuber <martin.elshuber@theobroma-systems.com>");
+MODULE_AUTHOR("Jakob Unterwurzacher <jakob.unterwurzacher@theobroma-systems.com>");
+MODULE_DESCRIPTION("Driver for Theobroma Systems UCAN devices");
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 89aec07c225f..045f0845e665 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -2,6 +2,7 @@
*
* Copyright (C) 2012 - 2014 Xilinx, Inc.
* Copyright (C) 2009 PetaLogix. All rights reserved.
+ * Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy
*
* Description:
* This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
@@ -25,8 +26,10 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/skbuff.h>
+#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/can/dev.h>
@@ -48,16 +51,34 @@ enum xcan_reg {
XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */
XCAN_IER_OFFSET = 0x20, /* Interrupt enable */
XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */
- XCAN_TXFIFO_ID_OFFSET = 0x30,/* TX FIFO ID */
- XCAN_TXFIFO_DLC_OFFSET = 0x34, /* TX FIFO DLC */
- XCAN_TXFIFO_DW1_OFFSET = 0x38, /* TX FIFO Data Word 1 */
- XCAN_TXFIFO_DW2_OFFSET = 0x3C, /* TX FIFO Data Word 2 */
- XCAN_RXFIFO_ID_OFFSET = 0x50, /* RX FIFO ID */
- XCAN_RXFIFO_DLC_OFFSET = 0x54, /* RX FIFO DLC */
- XCAN_RXFIFO_DW1_OFFSET = 0x58, /* RX FIFO Data Word 1 */
- XCAN_RXFIFO_DW2_OFFSET = 0x5C, /* RX FIFO Data Word 2 */
+
+ /* not on CAN FD cores */
+ XCAN_TXFIFO_OFFSET = 0x30, /* TX FIFO base */
+ XCAN_RXFIFO_OFFSET = 0x50, /* RX FIFO base */
+ XCAN_AFR_OFFSET = 0x60, /* Acceptance Filter */
+
+ /* only on CAN FD cores */
+ XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */
+ XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */
+ XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */
+ XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */
+ XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */
};
+#define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00)
+#define XCAN_FRAME_DLC_OFFSET(frame_base) ((frame_base) + 0x04)
+#define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08)
+#define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C)
+
+#define XCAN_CANFD_FRAME_SIZE 0x48
+#define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \
+ XCAN_CANFD_FRAME_SIZE * (n))
+#define XCAN_RXMSG_FRAME_OFFSET(n) (XCAN_RXMSG_BASE_OFFSET + \
+ XCAN_CANFD_FRAME_SIZE * (n))
+
+/* the single TX mailbox used by this driver on CAN FD HW */
+#define XCAN_TX_MAILBOX_IDX 0
+
/* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */
#define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */
#define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */
@@ -67,6 +88,9 @@ enum xcan_reg {
#define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */
#define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */
#define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */
+#define XCAN_BTR_SJW_MASK_CANFD 0x000F0000 /* Synchronous jump width */
+#define XCAN_BTR_TS2_MASK_CANFD 0x00000F00 /* Time segment 2 */
+#define XCAN_BTR_TS1_MASK_CANFD 0x0000003F /* Time segment 1 */
#define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */
#define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */
#define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */
@@ -80,6 +104,7 @@ enum xcan_reg {
#define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */
#define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */
#define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */
+#define XCAN_IXR_RXMNF_MASK 0x00020000 /* RX match not finished */
#define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */
#define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */
#define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */
@@ -97,15 +122,15 @@ enum xcan_reg {
#define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */
#define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */
#define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */
-
-#define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
- XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
- XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
- XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK)
+#define XCAN_FSR_FL_MASK 0x00003F00 /* RX Fill Level */
+#define XCAN_FSR_IRI_MASK 0x00000080 /* RX Increment Read Index */
+#define XCAN_FSR_RI_MASK 0x0000001F /* RX Read Index */
/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
#define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */
+#define XCAN_BTR_SJW_SHIFT_CANFD 16 /* Synchronous jump width */
+#define XCAN_BTR_TS2_SHIFT_CANFD 8 /* Time segment 2 */
#define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */
#define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */
#define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */
@@ -115,9 +140,31 @@ enum xcan_reg {
#define XCAN_FRAME_MAX_DATA_LEN 8
#define XCAN_TIMEOUT (1 * HZ)
+/* TX-FIFO-empty interrupt available */
+#define XCAN_FLAG_TXFEMP 0x0001
+/* RX Match Not Finished interrupt available */
+#define XCAN_FLAG_RXMNF 0x0002
+/* Extended acceptance filters with control at 0xE0 */
+#define XCAN_FLAG_EXT_FILTERS 0x0004
+/* TX mailboxes instead of TX FIFO */
+#define XCAN_FLAG_TX_MAILBOXES 0x0008
+/* RX FIFO with each buffer in separate registers at 0x1100
+ * instead of the regular FIFO at 0x50
+ */
+#define XCAN_FLAG_RX_FIFO_MULTI 0x0010
+
+struct xcan_devtype_data {
+ unsigned int flags;
+ const struct can_bittiming_const *bittiming_const;
+ const char *bus_clk_name;
+ unsigned int btr_ts2_shift;
+ unsigned int btr_sjw_shift;
+};
+
/**
* struct xcan_priv - This definition define CAN driver instance
* @can: CAN private data structure.
+ * @tx_lock: Lock for synchronizing TX interrupt handling
* @tx_head: Tx CAN packets ready to send on the queue
* @tx_tail: Tx CAN packets successfully sended on the queue
* @tx_max: Maximum number packets the driver can send
@@ -129,9 +176,11 @@ enum xcan_reg {
* @irq_flags: For request_irq()
* @bus_clk: Pointer to struct clk
* @can_clk: Pointer to struct clk
+ * @devtype: Device type specific constants
*/
struct xcan_priv {
struct can_priv can;
+ spinlock_t tx_lock;
unsigned int tx_head;
unsigned int tx_tail;
unsigned int tx_max;
@@ -144,6 +193,7 @@ struct xcan_priv {
unsigned long irq_flags;
struct clk *bus_clk;
struct clk *can_clk;
+ struct xcan_devtype_data devtype;
};
/* CAN Bittiming constants as per Xilinx CAN specs */
@@ -159,6 +209,18 @@ static const struct can_bittiming_const xcan_bittiming_const = {
.brp_inc = 1,
};
+static const struct can_bittiming_const xcan_bittiming_const_canfd = {
+ .name = DRIVER_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 64,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
/**
* xcan_write_reg_le - Write a value to the device register little endian
* @priv: Driver private data structure
@@ -214,6 +276,23 @@ static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg)
}
/**
+ * xcan_rx_int_mask - Get the mask for the receive interrupt
+ * @priv: Driver private data structure
+ *
+ * Return: The receive interrupt mask used by the driver on this HW
+ */
+static u32 xcan_rx_int_mask(const struct xcan_priv *priv)
+{
+ /* RXNEMP is better suited for our use case as it cannot be cleared
+ * while the FIFO is non-empty, but CAN FD HW does not have it
+ */
+ if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
+ return XCAN_IXR_RXOK_MASK;
+ else
+ return XCAN_IXR_RXNEMP_MASK;
+}
+
+/**
* set_reset_mode - Resets the CAN device mode
* @ndev: Pointer to net_device structure
*
@@ -238,6 +317,10 @@ static int set_reset_mode(struct net_device *ndev)
usleep_range(500, 10000);
}
+ /* reset clears FIFOs */
+ priv->tx_head = 0;
+ priv->tx_tail = 0;
+
return 0;
}
@@ -273,10 +356,10 @@ static int xcan_set_bittiming(struct net_device *ndev)
btr1 = (bt->prop_seg + bt->phase_seg1 - 1);
/* Setting Time Segment 2 in BTR Register */
- btr1 |= (bt->phase_seg2 - 1) << XCAN_BTR_TS2_SHIFT;
+ btr1 |= (bt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
/* Setting Synchronous jump width in BTR Register */
- btr1 |= (bt->sjw - 1) << XCAN_BTR_SJW_SHIFT;
+ btr1 |= (bt->sjw - 1) << priv->devtype.btr_sjw_shift;
priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
@@ -304,6 +387,7 @@ static int xcan_chip_start(struct net_device *ndev)
u32 reg_msr, reg_sr_mask;
int err;
unsigned long timeout;
+ u32 ier;
/* Check if it is in reset mode */
err = set_reset_mode(ndev);
@@ -315,7 +399,15 @@ static int xcan_chip_start(struct net_device *ndev)
return err;
/* Enable interrupts */
- priv->write_reg(priv, XCAN_IER_OFFSET, XCAN_INTR_ALL);
+ ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |
+ XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK |
+ XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
+ XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv);
+
+ if (priv->devtype.flags & XCAN_FLAG_RXMNF)
+ ier |= XCAN_IXR_RXMNF_MASK;
+
+ priv->write_reg(priv, XCAN_IER_OFFSET, ier);
/* Check whether it is loopback mode or normal mode */
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
@@ -326,6 +418,12 @@ static int xcan_chip_start(struct net_device *ndev)
reg_sr_mask = XCAN_SR_NORMAL_MASK;
}
+ /* enable the first extended filter, if any, as cores with extended
+ * filtering default to non-receipt if all filters are disabled
+ */
+ if (priv->devtype.flags & XCAN_FLAG_EXT_FILTERS)
+ priv->write_reg(priv, XCAN_AFR_EXT_OFFSET, 0x00000001);
+
priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
@@ -376,33 +474,15 @@ static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
}
/**
- * xcan_start_xmit - Starts the transmission
- * @skb: sk_buff pointer that contains data to be Txed
- * @ndev: Pointer to net_device structure
- *
- * This function is invoked from upper layers to initiate transmission. This
- * function uses the next available free txbuff and populates their fields to
- * start the transmission.
- *
- * Return: 0 on success and failure value on error
+ * xcan_write_frame - Write a frame to HW
+ * @skb: sk_buff pointer that contains data to be Txed
+ * @frame_offset: Register offset to write the frame to
*/
-static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb,
+ int frame_offset)
{
- struct xcan_priv *priv = netdev_priv(ndev);
- struct net_device_stats *stats = &ndev->stats;
- struct can_frame *cf = (struct can_frame *)skb->data;
u32 id, dlc, data[2] = {0, 0};
-
- if (can_dropped_invalid_skb(ndev, skb))
- return NETDEV_TX_OK;
-
- /* Check if the TX buffer is full */
- if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
- XCAN_SR_TXFLL_MASK)) {
- netif_stop_queue(ndev);
- netdev_err(ndev, "BUG!, TX FIFO full when queue awake!\n");
- return NETDEV_TX_BUSY;
- }
+ struct can_frame *cf = (struct can_frame *)skb->data;
/* Watch carefully on the bit sequence */
if (cf->can_id & CAN_EFF_FLAG) {
@@ -438,26 +518,119 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (cf->can_dlc > 4)
data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
- can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
- priv->tx_head++;
-
- /* Write the Frame to Xilinx CAN TX FIFO */
- priv->write_reg(priv, XCAN_TXFIFO_ID_OFFSET, id);
- /* If the CAN frame is RTR frame this write triggers tranmission */
- priv->write_reg(priv, XCAN_TXFIFO_DLC_OFFSET, dlc);
+ priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
+ /* If the CAN frame is RTR frame this write triggers transmission
+ * (not on CAN FD)
+ */
+ priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc);
if (!(cf->can_id & CAN_RTR_FLAG)) {
- priv->write_reg(priv, XCAN_TXFIFO_DW1_OFFSET, data[0]);
+ priv->write_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_offset),
+ data[0]);
/* If the CAN frame is Standard/Extended frame this
- * write triggers tranmission
+ * write triggers transmission (not on CAN FD)
*/
- priv->write_reg(priv, XCAN_TXFIFO_DW2_OFFSET, data[1]);
- stats->tx_bytes += cf->can_dlc;
+ priv->write_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_offset),
+ data[1]);
}
+}
+
+/**
+ * xcan_start_xmit_fifo - Starts the transmission (FIFO mode)
+ *
+ * Return: 0 on success, -ENOSPC if FIFO is full.
+ */
+static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ unsigned long flags;
+
+ /* Check if the TX buffer is full */
+ if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
+ XCAN_SR_TXFLL_MASK))
+ return -ENOSPC;
+
+ can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+
+ priv->tx_head++;
+
+ xcan_write_frame(priv, skb, XCAN_TXFIFO_OFFSET);
+
+ /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
+ if (priv->tx_max > 1)
+ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
/* Check if the TX buffer is full */
if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
netif_stop_queue(ndev);
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ return 0;
+}
+
+/**
+ * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode)
+ *
+ * Return: 0 on success, -ENOSPC if there is no space
+ */
+static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ unsigned long flags;
+
+ if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) &
+ BIT(XCAN_TX_MAILBOX_IDX)))
+ return -ENOSPC;
+
+ can_put_echo_skb(skb, ndev, 0);
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+
+ priv->tx_head++;
+
+ xcan_write_frame(priv, skb,
+ XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX));
+
+ /* Mark buffer as ready for transmit */
+ priv->write_reg(priv, XCAN_TRR_OFFSET, BIT(XCAN_TX_MAILBOX_IDX));
+
+ netif_stop_queue(ndev);
+
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ return 0;
+}
+
+/**
+ * xcan_start_xmit - Starts the transmission
+ * @skb: sk_buff pointer that contains data to be Txed
+ * @ndev: Pointer to net_device structure
+ *
+ * This function is invoked from upper layers to initiate transmission.
+ *
+ * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full
+ */
+static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES)
+ ret = xcan_start_xmit_mailbox(skb, ndev);
+ else
+ ret = xcan_start_xmit_fifo(skb, ndev);
+
+ if (ret < 0) {
+ netdev_err(ndev, "BUG!, TX full when queue awake!\n");
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+ }
+
return NETDEV_TX_OK;
}
@@ -465,13 +638,14 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
* xcan_rx - Is called from CAN isr to complete the received
* frame processing
* @ndev: Pointer to net_device structure
+ * @frame_base: Register offset to the frame to be read
*
* This function is invoked from the CAN isr(poll) to process the Rx frames. It
* does minimal processing and invokes "netif_receive_skb" to complete further
* processing.
* Return: 1 on success and 0 on failure.
*/
-static int xcan_rx(struct net_device *ndev)
+static int xcan_rx(struct net_device *ndev, int frame_base)
{
struct xcan_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
@@ -486,9 +660,9 @@ static int xcan_rx(struct net_device *ndev)
}
/* Read a frame from Xilinx zynq CANPS */
- id_xcan = priv->read_reg(priv, XCAN_RXFIFO_ID_OFFSET);
- dlc = priv->read_reg(priv, XCAN_RXFIFO_DLC_OFFSET) >>
- XCAN_DLCR_DLC_SHIFT;
+ id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
+ dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)) >>
+ XCAN_DLCR_DLC_SHIFT;
/* Change Xilinx CAN data length format to socketCAN data format */
cf->can_dlc = get_can_dlc(dlc);
@@ -511,8 +685,8 @@ static int xcan_rx(struct net_device *ndev)
}
/* DW1/DW2 must always be read to remove message from RXFIFO */
- data[0] = priv->read_reg(priv, XCAN_RXFIFO_DW1_OFFSET);
- data[1] = priv->read_reg(priv, XCAN_RXFIFO_DW2_OFFSET);
+ data[0] = priv->read_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_base));
+ data[1] = priv->read_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_base));
if (!(cf->can_id & CAN_RTR_FLAG)) {
/* Change Xilinx CAN data format to socketCAN data format */
@@ -530,6 +704,103 @@ static int xcan_rx(struct net_device *ndev)
}
/**
+ * xcan_current_error_state - Get current error state from HW
+ * @ndev: Pointer to net_device structure
+ *
+ * Checks the current CAN error state from the HW. Note that this
+ * only checks for ERROR_PASSIVE and ERROR_WARNING.
+ *
+ * Return:
+ * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
+ * otherwise.
+ */
+static enum can_state xcan_current_error_state(struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
+
+ if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
+ return CAN_STATE_ERROR_PASSIVE;
+ else if (status & XCAN_SR_ERRWRN_MASK)
+ return CAN_STATE_ERROR_WARNING;
+ else
+ return CAN_STATE_ERROR_ACTIVE;
+}
+
+/**
+ * xcan_set_error_state - Set new CAN error state
+ * @ndev: Pointer to net_device structure
+ * @new_state: The new CAN state to be set
+ * @cf: Error frame to be populated or NULL
+ *
+ * Set new CAN error state for the device, updating statistics and
+ * populating the error frame if given.
+ */
+static void xcan_set_error_state(struct net_device *ndev,
+ enum can_state new_state,
+ struct can_frame *cf)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
+ u32 txerr = ecr & XCAN_ECR_TEC_MASK;
+ u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
+ enum can_state tx_state = txerr >= rxerr ? new_state : 0;
+ enum can_state rx_state = txerr <= rxerr ? new_state : 0;
+
+ /* non-ERROR states are handled elsewhere */
+ if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE))
+ return;
+
+ can_change_state(ndev, cf, tx_state, rx_state);
+
+ if (cf) {
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
+}
+
+/**
+ * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
+ * @ndev: Pointer to net_device structure
+ *
+ * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
+ * the performed RX/TX has caused it to drop to a lesser state and set
+ * the interface state accordingly.
+ */
+static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ enum can_state old_state = priv->can.state;
+ enum can_state new_state;
+
+ /* changing error state due to successful frame RX/TX can only
+ * occur from these states
+ */
+ if (old_state != CAN_STATE_ERROR_WARNING &&
+ old_state != CAN_STATE_ERROR_PASSIVE)
+ return;
+
+ new_state = xcan_current_error_state(ndev);
+
+ if (new_state != old_state) {
+ struct sk_buff *skb;
+ struct can_frame *cf;
+
+ skb = alloc_can_err_skb(ndev, &cf);
+
+ xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
+
+ if (skb) {
+ struct net_device_stats *stats = &ndev->stats;
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ }
+ }
+}
+
+/**
* xcan_err_interrupt - error frame Isr
* @ndev: net_device pointer
* @isr: interrupt status register value
@@ -544,16 +815,12 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
struct net_device_stats *stats = &ndev->stats;
struct can_frame *cf;
struct sk_buff *skb;
- u32 err_status, status, txerr = 0, rxerr = 0;
+ u32 err_status;
skb = alloc_can_err_skb(ndev, &cf);
err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
- txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
- rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
- XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
- status = priv->read_reg(priv, XCAN_SR_OFFSET);
if (isr & XCAN_IXR_BSOFF_MASK) {
priv->can.state = CAN_STATE_BUS_OFF;
@@ -563,28 +830,11 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
can_bus_off(ndev);
if (skb)
cf->can_id |= CAN_ERR_BUSOFF;
- } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) {
- priv->can.state = CAN_STATE_ERROR_PASSIVE;
- priv->can.can_stats.error_passive++;
- if (skb) {
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = (rxerr > 127) ?
- CAN_ERR_CRTL_RX_PASSIVE :
- CAN_ERR_CRTL_TX_PASSIVE;
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
- }
- } else if (status & XCAN_SR_ERRWRN_MASK) {
- priv->can.state = CAN_STATE_ERROR_WARNING;
- priv->can.can_stats.error_warning++;
- if (skb) {
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] |= (txerr > rxerr) ?
- CAN_ERR_CRTL_TX_WARNING :
- CAN_ERR_CRTL_RX_WARNING;
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
- }
+ } else {
+ enum can_state new_state = xcan_current_error_state(ndev);
+
+ if (new_state != priv->can.state)
+ xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
}
/* Check for Arbitration lost interrupt */
@@ -600,13 +850,23 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
if (isr & XCAN_IXR_RXOFLW_MASK) {
stats->rx_over_errors++;
stats->rx_errors++;
- priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
if (skb) {
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
}
}
+ /* Check for RX Match Not Finished interrupt */
+ if (isr & XCAN_IXR_RXMNF_MASK) {
+ stats->rx_dropped++;
+ stats->rx_errors++;
+ netdev_err(ndev, "RX match not finished, frame discarded\n");
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= CAN_ERR_CRTL_UNSPEC;
+ }
+ }
+
/* Check for error interrupt */
if (isr & XCAN_IXR_ERROR_MASK) {
if (skb)
@@ -691,6 +951,44 @@ static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
}
/**
+ * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame
+ *
+ * Return: Register offset of the next frame in RX FIFO.
+ */
+static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv)
+{
+ int offset;
+
+ if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) {
+ u32 fsr;
+
+ /* clear RXOK before the is-empty check so that any newly
+ * received frame will reassert it without a race
+ */
+ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK);
+
+ fsr = priv->read_reg(priv, XCAN_FSR_OFFSET);
+
+ /* check if RX FIFO is empty */
+ if (!(fsr & XCAN_FSR_FL_MASK))
+ return -ENOENT;
+
+ offset = XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
+
+ } else {
+ /* check if RX FIFO is empty */
+ if (!(priv->read_reg(priv, XCAN_ISR_OFFSET) &
+ XCAN_IXR_RXNEMP_MASK))
+ return -ENOENT;
+
+ /* frames are read from a static offset */
+ offset = XCAN_RXFIFO_OFFSET;
+ }
+
+ return offset;
+}
+
+/**
* xcan_rx_poll - Poll routine for rx packets (NAPI)
* @napi: napi structure pointer
* @quota: Max number of rx packets to be processed.
@@ -704,31 +1002,35 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
{
struct net_device *ndev = napi->dev;
struct xcan_priv *priv = netdev_priv(ndev);
- u32 isr, ier;
+ u32 ier;
int work_done = 0;
-
- isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
- while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) {
- if (isr & XCAN_IXR_RXOK_MASK) {
- priv->write_reg(priv, XCAN_ICR_OFFSET,
- XCAN_IXR_RXOK_MASK);
- work_done += xcan_rx(ndev);
- } else {
+ int frame_offset;
+
+ while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 &&
+ (work_done < quota)) {
+ work_done += xcan_rx(ndev, frame_offset);
+
+ if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
+ /* increment read index */
+ priv->write_reg(priv, XCAN_FSR_OFFSET,
+ XCAN_FSR_IRI_MASK);
+ else
+ /* clear rx-not-empty (will actually clear only if
+ * empty)
+ */
priv->write_reg(priv, XCAN_ICR_OFFSET,
- XCAN_IXR_RXNEMP_MASK);
- break;
- }
- priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
- isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
+ XCAN_IXR_RXNEMP_MASK);
}
- if (work_done)
+ if (work_done) {
can_led_event(ndev, CAN_LED_EVENT_RX);
+ xcan_update_error_state_after_rxtx(ndev);
+ }
if (work_done < quota) {
napi_complete_done(napi, work_done);
ier = priv->read_reg(priv, XCAN_IER_OFFSET);
- ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK);
+ ier |= xcan_rx_int_mask(priv);
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
}
return work_done;
@@ -743,18 +1045,71 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
{
struct xcan_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
+ unsigned int frames_in_fifo;
+ int frames_sent = 1; /* TXOK => at least 1 frame was sent */
+ unsigned long flags;
+ int retries = 0;
+
+ /* Synchronize with xmit as we need to know the exact number
+ * of frames in the FIFO to stay in sync due to the TXFEMP
+ * handling.
+ * This also prevents a race between netif_wake_queue() and
+ * netif_stop_queue().
+ */
+ spin_lock_irqsave(&priv->tx_lock, flags);
+
+ frames_in_fifo = priv->tx_head - priv->tx_tail;
- while ((priv->tx_head - priv->tx_tail > 0) &&
- (isr & XCAN_IXR_TXOK_MASK)) {
+ if (WARN_ON_ONCE(frames_in_fifo == 0)) {
+ /* clear TXOK anyway to avoid getting back here */
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
- can_get_echo_skb(ndev, priv->tx_tail %
- priv->tx_max);
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+ return;
+ }
+
+ /* Check if 2 frames were sent (TXOK only means that at least 1
+ * frame was sent).
+ */
+ if (frames_in_fifo > 1) {
+ WARN_ON(frames_in_fifo > priv->tx_max);
+
+ /* Synchronize TXOK and isr so that after the loop:
+ * (1) isr variable is up-to-date at least up to TXOK clear
+ * time. This avoids us clearing a TXOK of a second frame
+ * but not noticing that the FIFO is now empty and thus
+ * marking only a single frame as sent.
+ * (2) No TXOK is left. Having one could mean leaving a
+ * stray TXOK as we might process the associated frame
+ * via TXFEMP handling as we read TXFEMP *after* TXOK
+ * clear to satisfy (1).
+ */
+ while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) {
+ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
+ isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
+ }
+
+ if (isr & XCAN_IXR_TXFEMP_MASK) {
+ /* nothing in FIFO anymore */
+ frames_sent = frames_in_fifo;
+ }
+ } else {
+ /* single frame in fifo, just clear TXOK */
+ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
+ }
+
+ while (frames_sent--) {
+ stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail %
+ priv->tx_max);
priv->tx_tail++;
stats->tx_packets++;
- isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
}
- can_led_event(ndev, CAN_LED_EVENT_TX);
+
netif_wake_queue(ndev);
+
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ can_led_event(ndev, CAN_LED_EVENT_TX);
+ xcan_update_error_state_after_rxtx(ndev);
}
/**
@@ -773,6 +1128,8 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
struct net_device *ndev = (struct net_device *)dev_id;
struct xcan_priv *priv = netdev_priv(ndev);
u32 isr, ier;
+ u32 isr_errors;
+ u32 rx_int_mask = xcan_rx_int_mask(priv);
/* Get the interrupt status from Xilinx CAN */
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
@@ -791,18 +1148,18 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
xcan_tx_interrupt(ndev, isr);
/* Check for the type of error interrupt and Processing it */
- if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
- XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) {
- priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK |
- XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK |
- XCAN_IXR_ARBLST_MASK));
+ isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
+ XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
+ XCAN_IXR_RXMNF_MASK);
+ if (isr_errors) {
+ priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
xcan_err_interrupt(ndev, isr);
}
/* Check for the type of receive interrupt and Processing it */
- if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) {
+ if (isr & rx_int_mask) {
ier = priv->read_reg(priv, XCAN_IER_OFFSET);
- ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK);
+ ier &= ~rx_int_mask;
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
napi_schedule(&priv->napi);
}
@@ -819,13 +1176,9 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
static void xcan_chip_stop(struct net_device *ndev)
{
struct xcan_priv *priv = netdev_priv(ndev);
- u32 ier;
/* Disable interrupts and leave the can in configuration mode */
- ier = priv->read_reg(priv, XCAN_IER_OFFSET);
- ier &= ~XCAN_INTR_ALL;
- priv->write_reg(priv, XCAN_IER_OFFSET, ier);
- priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
+ set_reset_mode(ndev);
priv->can.state = CAN_STATE_STOPPED;
}
@@ -958,10 +1311,15 @@ static const struct net_device_ops xcan_netdev_ops = {
*/
static int __maybe_unused xcan_suspend(struct device *dev)
{
- if (!device_may_wakeup(dev))
- return pm_runtime_force_suspend(dev);
+ struct net_device *ndev = dev_get_drvdata(dev);
- return 0;
+ if (netif_running(ndev)) {
+ netif_stop_queue(ndev);
+ netif_device_detach(ndev);
+ xcan_chip_stop(ndev);
+ }
+
+ return pm_runtime_force_suspend(dev);
}
/**
@@ -973,11 +1331,27 @@ static int __maybe_unused xcan_suspend(struct device *dev)
*/
static int __maybe_unused xcan_resume(struct device *dev)
{
- if (!device_may_wakeup(dev))
- return pm_runtime_force_resume(dev);
+ struct net_device *ndev = dev_get_drvdata(dev);
+ int ret;
- return 0;
+ ret = pm_runtime_force_resume(dev);
+ if (ret) {
+ dev_err(dev, "pm_runtime_force_resume failed on resume\n");
+ return ret;
+ }
+
+ if (netif_running(ndev)) {
+ ret = xcan_chip_start(ndev);
+ if (ret) {
+ dev_err(dev, "xcan_chip_start failed on resume\n");
+ return ret;
+ }
+
+ netif_device_attach(ndev);
+ netif_start_queue(ndev);
+ }
+ return 0;
}
/**
@@ -992,14 +1366,6 @@ static int __maybe_unused xcan_runtime_suspend(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct xcan_priv *priv = netdev_priv(ndev);
- if (netif_running(ndev)) {
- netif_stop_queue(ndev);
- netif_device_detach(ndev);
- }
-
- priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK);
- priv->can.state = CAN_STATE_SLEEPING;
-
clk_disable_unprepare(priv->bus_clk);
clk_disable_unprepare(priv->can_clk);
@@ -1018,7 +1384,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct xcan_priv *priv = netdev_priv(ndev);
int ret;
- u32 isr, status;
ret = clk_prepare_enable(priv->bus_clk);
if (ret) {
@@ -1032,27 +1397,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev)
return ret;
}
- priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
- isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
- status = priv->read_reg(priv, XCAN_SR_OFFSET);
-
- if (netif_running(ndev)) {
- if (isr & XCAN_IXR_BSOFF_MASK) {
- priv->can.state = CAN_STATE_BUS_OFF;
- priv->write_reg(priv, XCAN_SRR_OFFSET,
- XCAN_SRR_RESET_MASK);
- } else if ((status & XCAN_SR_ESTAT_MASK) ==
- XCAN_SR_ESTAT_MASK) {
- priv->can.state = CAN_STATE_ERROR_PASSIVE;
- } else if (status & XCAN_SR_ERRWRN_MASK) {
- priv->can.state = CAN_STATE_ERROR_WARNING;
- } else {
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
- }
- netif_device_attach(ndev);
- netif_start_queue(ndev);
- }
-
return 0;
}
@@ -1061,6 +1405,40 @@ static const struct dev_pm_ops xcan_dev_pm_ops = {
SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
};
+static const struct xcan_devtype_data xcan_zynq_data = {
+ .bittiming_const = &xcan_bittiming_const,
+ .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
+ .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
+ .bus_clk_name = "pclk",
+};
+
+static const struct xcan_devtype_data xcan_axi_data = {
+ .bittiming_const = &xcan_bittiming_const,
+ .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
+ .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
+ .bus_clk_name = "s_axi_aclk",
+};
+
+static const struct xcan_devtype_data xcan_canfd_data = {
+ .flags = XCAN_FLAG_EXT_FILTERS |
+ XCAN_FLAG_RXMNF |
+ XCAN_FLAG_TX_MAILBOXES |
+ XCAN_FLAG_RX_FIFO_MULTI,
+ .bittiming_const = &xcan_bittiming_const,
+ .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
+ .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
+ .bus_clk_name = "s_axi_aclk",
+};
+
+/* Match table for OF platform binding */
+static const struct of_device_id xcan_of_match[] = {
+ { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
+ { .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data },
+ { .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, xcan_of_match);
+
/**
* xcan_probe - Platform registration call
* @pdev: Handle to the platform device structure
@@ -1075,8 +1453,13 @@ static int xcan_probe(struct platform_device *pdev)
struct resource *res; /* IO mem resources */
struct net_device *ndev;
struct xcan_priv *priv;
+ const struct of_device_id *of_id;
+ const struct xcan_devtype_data *devtype = &xcan_axi_data;
void __iomem *addr;
- int ret, rx_max, tx_max;
+ int ret;
+ int rx_max, tx_max;
+ int hw_tx_max, hw_rx_max;
+ const char *hw_tx_max_property;
/* Get the virtual base address for the device */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1086,13 +1469,54 @@ static int xcan_probe(struct platform_device *pdev)
goto err;
}
- ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max);
- if (ret < 0)
+ of_id = of_match_device(xcan_of_match, &pdev->dev);
+ if (of_id && of_id->data)
+ devtype = of_id->data;
+
+ hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ?
+ "tx-mailbox-count" : "tx-fifo-depth";
+
+ ret = of_property_read_u32(pdev->dev.of_node, hw_tx_max_property,
+ &hw_tx_max);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "missing %s property\n",
+ hw_tx_max_property);
goto err;
+ }
- ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", &rx_max);
- if (ret < 0)
+ ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
+ &hw_rx_max);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "missing rx-fifo-depth property (mailbox mode is not supported)\n");
goto err;
+ }
+
+ /* With TX FIFO:
+ *
+ * There is no way to directly figure out how many frames have been
+ * sent when the TXOK interrupt is processed. If TXFEMP
+ * is supported, we can have 2 frames in the FIFO and use TXFEMP
+ * to determine if 1 or 2 frames have been sent.
+ * Theoretically we should be able to use TXFWMEMP to determine up
+ * to 3 frames, but it seems that after putting a second frame in the
+ * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
+ * than 2 frames in FIFO) is set anyway with no TXOK (a frame was
+ * sent), which is not a sensible state - possibly TXFWMEMP is not
+ * completely synchronized with the rest of the bits?
+ *
+ * With TX mailboxes:
+ *
+ * HW sends frames in CAN ID priority order. To preserve FIFO ordering
+ * we submit frames one at a time.
+ */
+ if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) &&
+ (devtype->flags & XCAN_FLAG_TXFEMP))
+ tx_max = min(hw_tx_max, 2);
+ else
+ tx_max = 1;
+
+ rx_max = hw_rx_max;
/* Create a CAN device instance */
ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
@@ -1101,13 +1525,15 @@ static int xcan_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
priv->dev = &pdev->dev;
- priv->can.bittiming_const = &xcan_bittiming_const;
+ priv->can.bittiming_const = devtype->bittiming_const;
priv->can.do_set_mode = xcan_do_set_mode;
priv->can.do_get_berr_counter = xcan_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_BERR_REPORTING;
priv->reg_base = addr;
priv->tx_max = tx_max;
+ priv->devtype = *devtype;
+ spin_lock_init(&priv->tx_lock);
/* Get IRQ for the device */
ndev->irq = platform_get_irq(pdev, 0);
@@ -1124,22 +1550,12 @@ static int xcan_probe(struct platform_device *pdev)
ret = PTR_ERR(priv->can_clk);
goto err_free;
}
- /* Check for type of CAN device */
- if (of_device_is_compatible(pdev->dev.of_node,
- "xlnx,zynq-can-1.0")) {
- priv->bus_clk = devm_clk_get(&pdev->dev, "pclk");
- if (IS_ERR(priv->bus_clk)) {
- dev_err(&pdev->dev, "bus clock not found\n");
- ret = PTR_ERR(priv->bus_clk);
- goto err_free;
- }
- } else {
- priv->bus_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
- if (IS_ERR(priv->bus_clk)) {
- dev_err(&pdev->dev, "bus clock not found\n");
- ret = PTR_ERR(priv->bus_clk);
- goto err_free;
- }
+
+ priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name);
+ if (IS_ERR(priv->bus_clk)) {
+ dev_err(&pdev->dev, "bus clock not found\n");
+ ret = PTR_ERR(priv->bus_clk);
+ goto err_free;
}
priv->write_reg = xcan_write_reg_le;
@@ -1172,9 +1588,9 @@ static int xcan_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
- netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n",
- priv->reg_base, ndev->irq, priv->can.clock.freq,
- priv->tx_max);
+ netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n",
+ priv->reg_base, ndev->irq, priv->can.clock.freq,
+ hw_tx_max, priv->tx_max);
return 0;
@@ -1208,14 +1624,6 @@ static int xcan_remove(struct platform_device *pdev)
return 0;
}
-/* Match table for OF platform binding */
-static const struct of_device_id xcan_of_match[] = {
- { .compatible = "xlnx,zynq-can-1.0", },
- { .compatible = "xlnx,axi-can-1.00.a", },
- { /* end of list */ },
-};
-MODULE_DEVICE_TABLE(of, xcan_of_match);
-
static struct platform_driver xcan_driver = {
.probe = xcan_probe,
.remove = xcan_remove,
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index ac96ff40d37e..e0066adcd2f3 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -166,6 +166,11 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
reg &= ~P_TXQ_PSM_VDD(port);
core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
+ /* Enable learning */
+ reg = core_readl(priv, CORE_DIS_LEARN);
+ reg &= ~BIT(port);
+ core_writel(priv, reg, CORE_DIS_LEARN);
+
/* Enable Broadcom tags for that port if requested */
if (priv->brcm_tag_mask & BIT(port))
b53_brcm_hdr_setup(ds, port);
@@ -222,8 +227,13 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
u32 reg;
- if (priv->wol_ports_mask & (1 << port))
+ /* Disable learning while in WoL mode */
+ if (priv->wol_ports_mask & (1 << port)) {
+ reg = core_readl(priv, CORE_DIS_LEARN);
+ reg |= BIT(port);
+ core_writel(priv, reg, CORE_DIS_LEARN);
return;
+ }
if (port == priv->moca_port)
bcm_sf2_port_intr_disable(priv, port);
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index b89acaee12d4..1e37b65aab93 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -755,7 +755,8 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
port_num = fs->ring_cookie / SF2_NUM_EGRESS_QUEUES;
if (fs->ring_cookie == RX_CLS_FLOW_DISC ||
- !dsa_is_user_port(ds, port_num) ||
+ !(dsa_is_user_port(ds, port_num) ||
+ dsa_is_cpu_port(ds, port_num)) ||
port_num >= priv->hw_params.num_ports)
return -EINVAL;
/*
diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
index 3ccd5a865dcb..0a1e530d52b7 100644
--- a/drivers/net/dsa/bcm_sf2_regs.h
+++ b/drivers/net/dsa/bcm_sf2_regs.h
@@ -168,6 +168,8 @@ enum bcm_sf2_reg_offs {
#define CORE_SWITCH_CTRL 0x00088
#define MII_DUMB_FWDG_EN (1 << 6)
+#define CORE_DIS_LEARN 0x000f0
+
#define CORE_SFT_LRN_CTRL 0x000f8
#define SW_LEARN_CNTL(x) (1 << (x))
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 9dd270978064..0b5a2c31f395 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -343,6 +343,7 @@ static const struct irq_domain_ops mv88e6xxx_g1_irq_domain_ops = {
.xlate = irq_domain_xlate_twocell,
};
+/* To be called with reg_lock held */
static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip)
{
int irq, virq;
@@ -362,9 +363,15 @@ static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip)
static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)
{
- mv88e6xxx_g1_irq_free_common(chip);
-
+ /*
+ * free_irq must be called without reg_lock taken because the irq
+ * handler takes this lock, too.
+ */
free_irq(chip->irq, chip);
+
+ mutex_lock(&chip->reg_lock);
+ mv88e6xxx_g1_irq_free_common(chip);
+ mutex_unlock(&chip->reg_lock);
}
static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip)
@@ -469,10 +476,12 @@ static int mv88e6xxx_irq_poll_setup(struct mv88e6xxx_chip *chip)
static void mv88e6xxx_irq_poll_free(struct mv88e6xxx_chip *chip)
{
- mv88e6xxx_g1_irq_free_common(chip);
-
kthread_cancel_delayed_work_sync(&chip->irq_poll_work);
kthread_destroy_worker(chip->kworker);
+
+ mutex_lock(&chip->reg_lock);
+ mv88e6xxx_g1_irq_free_common(chip);
+ mutex_unlock(&chip->reg_lock);
}
int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask)
@@ -2608,7 +2617,6 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.rmu_disable = mv88e6085_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .serdes_power = mv88e6341_serdes_power,
};
static const struct mv88e6xxx_ops mv88e6095_ops = {
@@ -2774,6 +2782,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .serdes_power = mv88e6341_serdes_power,
.gpio_ops = &mv88e6352_gpio_ops,
};
@@ -2955,7 +2964,6 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .serdes_power = mv88e6341_serdes_power,
};
static const struct mv88e6xxx_ops mv88e6176_ops = {
@@ -3337,6 +3345,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .serdes_power = mv88e6341_serdes_power,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -4523,12 +4532,10 @@ out_g2_irq:
if (chip->info->g2_irqs > 0)
mv88e6xxx_g2_irq_free(chip);
out_g1_irq:
- mutex_lock(&chip->reg_lock);
if (chip->irq > 0)
mv88e6xxx_g1_irq_free(chip);
else
mv88e6xxx_irq_poll_free(chip);
- mutex_unlock(&chip->reg_lock);
out:
if (pdata)
dev_put(pdata->netdev);
@@ -4556,12 +4563,10 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
if (chip->info->g2_irqs > 0)
mv88e6xxx_g2_irq_free(chip);
- mutex_lock(&chip->reg_lock);
if (chip->irq > 0)
mv88e6xxx_g1_irq_free(chip);
else
mv88e6xxx_irq_poll_free(chip);
- mutex_unlock(&chip->reg_lock);
}
static const struct of_device_id mv88e6xxx_of_match[] = {
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index 5b7658bcf020..5c3ef9fc8207 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -32,7 +32,7 @@ config EL3
config 3C515
tristate "3c515 ISA \"Fast EtherLink\""
- depends on ISA && ISA_DMA_API
+ depends on ISA && ISA_DMA_API && !PPC32
---help---
If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
network card, say Y here.
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index af766fd61151..6fde68aa13a4 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -81,7 +81,6 @@ source "drivers/net/ethernet/huawei/Kconfig"
source "drivers/net/ethernet/i825xx/Kconfig"
source "drivers/net/ethernet/ibm/Kconfig"
source "drivers/net/ethernet/intel/Kconfig"
-source "drivers/net/ethernet/neterion/Kconfig"
source "drivers/net/ethernet/xscale/Kconfig"
config JME
@@ -128,6 +127,7 @@ config FEALNX
cards. <http://www.myson.com.tw/>
source "drivers/net/ethernet/natsemi/Kconfig"
+source "drivers/net/ethernet/neterion/Kconfig"
source "drivers/net/ethernet/netronome/Kconfig"
source "drivers/net/ethernet/ni/Kconfig"
source "drivers/net/ethernet/8390/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 22555e7fa752..b45d5f626b59 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -36,7 +36,6 @@ obj-$(CONFIG_NET_VENDOR_DEC) += dec/
obj-$(CONFIG_NET_VENDOR_DLINK) += dlink/
obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/
obj-$(CONFIG_NET_VENDOR_EZCHIP) += ezchip/
-obj-$(CONFIG_NET_VENDOR_EXAR) += neterion/
obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/
obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/
@@ -60,6 +59,7 @@ obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
+obj-$(CONFIG_NET_VENDOR_NETERION) += neterion/
obj-$(CONFIG_NET_VENDOR_NETRONOME) += netronome/
obj-$(CONFIG_NET_VENDOR_NI) += ni/
obj-$(CONFIG_NET_NETX) += netx-eth.o
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 3872ab96b80a..097467f44b0d 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -802,7 +802,7 @@ static int starfire_init_one(struct pci_dev *pdev,
int mii_status;
for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
- mdelay(100);
+ msleep(100);
boguscnt = 1000;
while (--boguscnt > 0)
if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 1b9d3130af4d..17f12c18d225 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -333,6 +333,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
+ io_sq->dma_addr_bits = ena_dev->dma_addr_bits;
io_sq->desc_entry_size =
(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
sizeof(struct ena_eth_io_tx_desc) :
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index f273af136fc7..9e5cf5583c87 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -44,7 +44,7 @@ config AMD8111_ETH
config LANCE
tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
- depends on ISA && ISA_DMA_API && !ARM
+ depends on ISA && ISA_DMA_API && !ARM && !PPC32
---help---
If you have a network (Ethernet) card of this type, say Y here.
Some LinkSys cards are of this type.
@@ -138,7 +138,7 @@ config PCMCIA_NMCLAN
config NI65
tristate "NI6510 support"
- depends on ISA && ISA_DMA_API && !ARM
+ depends on ISA && ISA_DMA_API && !ARM && !PPC32
---help---
If you have a network (Ethernet) card of this type, say Y here.
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index be198cc0b10c..f5ad12c10934 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -2036,22 +2036,22 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
}
lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!lp->tx_dma_addr)
return -ENOMEM;
lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!lp->rx_dma_addr)
return -ENOMEM;
lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!lp->tx_skbuff)
return -ENOMEM;
lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!lp->rx_skbuff)
return -ENOMEM;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index cc1e4f820e64..533094233659 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -289,7 +289,7 @@ static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
struct page *pages = NULL;
dma_addr_t pages_dma;
gfp_t gfp;
- int order, ret;
+ int order;
again:
order = alloc_order;
@@ -316,10 +316,9 @@ again:
/* Map the pages */
pages_dma = dma_map_page(pdata->dev, pages, 0,
PAGE_SIZE << order, DMA_FROM_DEVICE);
- ret = dma_mapping_error(pdata->dev, pages_dma);
- if (ret) {
+ if (dma_mapping_error(pdata->dev, pages_dma)) {
put_page(pages);
- return ret;
+ return -ENOMEM;
}
pa->pages = pages;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 4b5d625de8f0..8a3a60bb2688 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -1111,14 +1111,14 @@ static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
if (pdata->tx_pause != pdata->phy.tx_pause) {
new_state = 1;
- pdata->hw_if.config_tx_flow_control(pdata);
pdata->tx_pause = pdata->phy.tx_pause;
+ pdata->hw_if.config_tx_flow_control(pdata);
}
if (pdata->rx_pause != pdata->phy.rx_pause) {
new_state = 1;
- pdata->hw_if.config_rx_flow_control(pdata);
pdata->rx_pause = pdata->phy.rx_pause;
+ pdata->hw_if.config_rx_flow_control(pdata);
}
/* Speed support */
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 94270f654b3b..7087b88550db 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1686,6 +1686,7 @@ static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter)
skb = build_skb(page_address(page) + adapter->rx_page_offset,
adapter->rx_frag_size);
if (likely(skb)) {
+ skb_reserve(skb, NET_SKB_PAD);
adapter->rx_page_offset += adapter->rx_frag_size;
if (adapter->rx_page_offset >= PAGE_SIZE)
adapter->rx_page = NULL;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index b7aa8ad96dfb..c1d3ee9baf7e 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -230,4 +230,12 @@ config BNXT_DCB
If unsure, say N.
+config BNXT_HWMON
+ bool "Broadcom NetXtreme-C/E HWMON support"
+ default y
+ depends on BNXT && HWMON && !(BNXT=y && HWMON=m)
+ ---help---
+ Say Y if you want to expose the thermal sensor data on NetXtreme-C/E
+ devices, via the hwmon sysfs interface.
+
endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 631617d95769..284581c9680e 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1041,17 +1041,25 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)
return work_done;
}
-static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
+static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable)
{
u32 reg;
+ reg = umac_readl(priv, UMAC_MPD_CTRL);
+ if (enable)
+ reg |= MPD_EN;
+ else
+ reg &= ~MPD_EN;
+ umac_writel(priv, reg, UMAC_MPD_CTRL);
+}
+
+static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
+{
/* Stop monitoring MPD interrupt */
intrl2_0_mask_set(priv, INTRL2_0_MPD);
/* Clear the MagicPacket detection logic */
- reg = umac_readl(priv, UMAC_MPD_CTRL);
- reg &= ~MPD_EN;
- umac_writel(priv, reg, UMAC_MPD_CTRL);
+ mpd_enable_set(priv, false);
netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
}
@@ -1102,10 +1110,8 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
bcm_sysport_tx_reclaim_all(priv);
- if (priv->irq0_stat & INTRL2_0_MPD) {
+ if (priv->irq0_stat & INTRL2_0_MPD)
netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n");
- bcm_sysport_resume_from_wol(priv);
- }
if (!priv->is_lite)
goto out;
@@ -2449,9 +2455,7 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
/* Do not leave the UniMAC RBUF matching only MPD packets */
if (!timeout) {
- reg = umac_readl(priv, UMAC_MPD_CTRL);
- reg &= ~MPD_EN;
- umac_writel(priv, reg, UMAC_MPD_CTRL);
+ mpd_enable_set(priv, false);
netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
return -ETIMEDOUT;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index da18aa239acb..a4a90b6cdb46 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -3388,14 +3388,18 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
DP(BNX2X_MSG_ETHTOOL,
"rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled");
- return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
+ if (bp->state == BNX2X_STATE_OPEN)
+ return bnx2x_rss(bp, &bp->rss_conf_obj, false,
+ true);
} else if ((info->flow_type == UDP_V6_FLOW) &&
(bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
DP(BNX2X_MSG_ETHTOOL,
"rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled");
- return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
+ if (bp->state == BNX2X_STATE_OPEN)
+ return bnx2x_rss(bp, &bp->rss_conf_obj, false,
+ true);
}
return 0;
@@ -3509,7 +3513,10 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
}
- return bnx2x_config_rss_eth(bp, false);
+ if (bp->state == BNX2X_STATE_OPEN)
+ return bnx2x_config_rss_eth(bp, false);
+
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c612d74451a7..d7f51ab85b45 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -51,6 +51,8 @@
#include <linux/cpu_rmap.h>
#include <linux/cpumask.h>
#include <net/pkt_cls.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -1115,7 +1117,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
tpa_info->hash_type = PKT_HASH_TYPE_L4;
tpa_info->gso_type = SKB_GSO_TCPV4;
/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
- if (hash_type == 3)
+ if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
tpa_info->gso_type = SKB_GSO_TCPV6;
tpa_info->rss_hash =
le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
@@ -3445,7 +3447,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
cp_ring_id = le16_to_cpu(req->cmpl_ring);
intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
- if (bp->flags & BNXT_FLAG_SHORT_CMD) {
+ if (bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) {
void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
memcpy(short_cmd_req, req, msg_len);
@@ -3638,7 +3640,9 @@ int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
{
+ struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_drv_rgtr_input req = {0};
+ int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
@@ -3676,7 +3680,15 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
}
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ rc = -EIO;
+ else if (resp->flags &
+ cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
+ bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
}
static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
@@ -3981,6 +3993,7 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
if (set_rss) {
req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
+ req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
max_rings = bp->rx_nr_rings - 1;
@@ -4578,7 +4591,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
}
hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
- if (bp->flags & BNXT_FLAG_NEW_RM) {
+ if (BNXT_NEW_RM(bp)) {
u16 cp, stats;
hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
@@ -4624,7 +4637,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
req->fid = cpu_to_le16(0xffff);
enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
req->num_tx_rings = cpu_to_le16(tx_rings);
- if (bp->flags & BNXT_FLAG_NEW_RM) {
+ if (BNXT_NEW_RM(bp)) {
enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
@@ -4697,7 +4710,7 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
struct hwrm_func_vf_cfg_input req = {0};
int rc;
- if (!(bp->flags & BNXT_FLAG_NEW_RM)) {
+ if (!BNXT_NEW_RM(bp)) {
bp->hw_resc.resv_tx_rings = tx_rings;
return 0;
}
@@ -4757,7 +4770,7 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
vnic = rx + 1;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
- if ((bp->flags & BNXT_FLAG_NEW_RM) &&
+ if (BNXT_NEW_RM(bp) &&
(hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
hw_resc->resv_hw_ring_grps != grp || hw_resc->resv_vnics != vnic))
return true;
@@ -4793,7 +4806,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
return rc;
tx = hw_resc->resv_tx_rings;
- if (bp->flags & BNXT_FLAG_NEW_RM) {
+ if (BNXT_NEW_RM(bp)) {
rx = hw_resc->resv_rx_rings;
cp = hw_resc->resv_cp_rings;
grp = hw_resc->resv_hw_ring_grps;
@@ -4837,7 +4850,7 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
u32 flags;
int rc;
- if (!(bp->flags & BNXT_FLAG_NEW_RM))
+ if (!BNXT_NEW_RM(bp))
return 0;
__bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
@@ -4866,7 +4879,7 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
__bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
cp_rings, vnics);
flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
- if (bp->flags & BNXT_FLAG_NEW_RM)
+ if (BNXT_NEW_RM(bp))
flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
@@ -5088,9 +5101,9 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
flags = le16_to_cpu(resp->flags);
if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
- bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
+ bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
- bp->flags |= BNXT_FLAG_FW_DCBX_AGENT;
+ bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
}
if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
bp->flags |= BNXT_FLAG_MULTI_HOST;
@@ -5162,7 +5175,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
pf->vf_resv_strategy =
le16_to_cpu(resp->vf_reservation_strategy);
- if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL)
+ if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
}
hwrm_func_resc_qcaps_exit:
@@ -5248,7 +5261,7 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (bp->hwrm_spec_code >= 0x10803) {
rc = bnxt_hwrm_func_resc_qcaps(bp, true);
if (!rc)
- bp->flags |= BNXT_FLAG_NEW_RM;
+ bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
}
return 0;
}
@@ -5268,7 +5281,8 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
int rc = 0;
struct hwrm_queue_qportcfg_input req = {0};
struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
- u8 i, *qptr;
+ u8 i, j, *qptr;
+ bool no_rdma;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
@@ -5286,19 +5300,24 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
if (bp->max_tc > BNXT_MAX_QUEUE)
bp->max_tc = BNXT_MAX_QUEUE;
+ no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
+ qptr = &resp->queue_id0;
+ for (i = 0, j = 0; i < bp->max_tc; i++) {
+ bp->q_info[j].queue_id = *qptr++;
+ bp->q_info[j].queue_profile = *qptr++;
+ bp->tc_to_qidx[j] = j;
+ if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
+ (no_rdma && BNXT_PF(bp)))
+ j++;
+ }
+ bp->max_tc = max_t(u8, j, 1);
+
if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
bp->max_tc = 1;
if (bp->max_lltc > bp->max_tc)
bp->max_lltc = bp->max_tc;
- qptr = &resp->queue_id0;
- for (i = 0; i < bp->max_tc; i++) {
- bp->q_info[i].queue_id = *qptr++;
- bp->q_info[i].queue_profile = *qptr++;
- bp->tc_to_qidx[i] = i;
- }
-
qportcfg_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -5351,7 +5370,7 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
(dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
- bp->flags |= BNXT_FLAG_SHORT_CMD;
+ bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
hwrm_ver_get_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -5920,7 +5939,7 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num)
max_idx = min_t(int, bp->total_irqs, max_cp);
avail_msix = max_idx - bp->cp_nr_rings;
- if (!(bp->flags & BNXT_FLAG_NEW_RM) || avail_msix >= num)
+ if (!BNXT_NEW_RM(bp) || avail_msix >= num)
return avail_msix;
if (max_irq < total_req) {
@@ -5933,7 +5952,7 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num)
static int bnxt_get_num_msix(struct bnxt *bp)
{
- if (!(bp->flags & BNXT_FLAG_NEW_RM))
+ if (!BNXT_NEW_RM(bp))
return bnxt_get_max_func_irqs(bp);
return bnxt_cp_rings_in_use(bp);
@@ -6056,8 +6075,7 @@ int bnxt_reserve_rings(struct bnxt *bp)
netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc);
return rc;
}
- if ((bp->flags & BNXT_FLAG_NEW_RM) &&
- (bnxt_get_num_msix(bp) != bp->total_irqs)) {
+ if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) {
bnxt_ulp_irq_stop(bp);
bnxt_clear_int_mode(bp);
rc = bnxt_init_int_mode(bp);
@@ -6337,6 +6355,10 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
}
+ if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
+ if (bp->test_info)
+ bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
+ }
if (resp->supported_speeds_auto_mode)
link_info->support_auto_speeds =
le16_to_cpu(resp->supported_speeds_auto_mode);
@@ -6633,6 +6655,39 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
+static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
+{
+ struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_drv_if_change_input req = {0};
+ bool resc_reinit = false;
+ int rc;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
+ if (up)
+ req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc && (resp->flags &
+ cpu_to_le32(FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)))
+ resc_reinit = true;
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ if (up && resc_reinit && BNXT_NEW_RM(bp)) {
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+
+ rc = bnxt_hwrm_func_resc_qcaps(bp, true);
+ hw_resc->resv_cp_rings = 0;
+ hw_resc->resv_tx_rings = 0;
+ hw_resc->resv_rx_rings = 0;
+ hw_resc->resv_hw_ring_grps = 0;
+ hw_resc->resv_vnics = 0;
+ }
+ return rc;
+}
+
static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
{
struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
@@ -6742,6 +6797,62 @@ static void bnxt_get_wol_settings(struct bnxt *bp)
} while (handle && handle != 0xffff);
}
+#ifdef CONFIG_BNXT_HWMON
+static ssize_t bnxt_show_temp(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct hwrm_temp_monitor_query_input req = {0};
+ struct hwrm_temp_monitor_query_output *resp;
+ struct bnxt *bp = dev_get_drvdata(dev);
+ u32 temp = 0;
+
+ resp = bp->hwrm_cmd_resp_addr;
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
+ temp = resp->temp * 1000; /* display millidegree */
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ return sprintf(buf, "%u\n", temp);
+}
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
+
+static struct attribute *bnxt_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(bnxt);
+
+static void bnxt_hwmon_close(struct bnxt *bp)
+{
+ if (bp->hwmon_dev) {
+ hwmon_device_unregister(bp->hwmon_dev);
+ bp->hwmon_dev = NULL;
+ }
+}
+
+static void bnxt_hwmon_open(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+
+ bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
+ DRV_MODULE_NAME, bp,
+ bnxt_groups);
+ if (IS_ERR(bp->hwmon_dev)) {
+ bp->hwmon_dev = NULL;
+ dev_warn(&pdev->dev, "Cannot register hwmon device\n");
+ }
+}
+#else
+static void bnxt_hwmon_close(struct bnxt *bp)
+{
+}
+
+static void bnxt_hwmon_open(struct bnxt *bp)
+{
+}
+#endif
+
static bool bnxt_eee_config_ok(struct bnxt *bp)
{
struct ethtool_eee *eee = &bp->eee;
@@ -6894,8 +7005,14 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
mutex_lock(&bp->link_lock);
rc = bnxt_update_phy_setting(bp);
mutex_unlock(&bp->link_lock);
- if (rc)
+ if (rc) {
netdev_warn(bp->dev, "failed to update phy settings\n");
+ if (BNXT_SINGLE_PF(bp)) {
+ bp->link_info.phy_retry = true;
+ bp->link_info.phy_retry_expires =
+ jiffies + 5 * HZ;
+ }
+ }
}
if (irq_re_init)
@@ -6981,8 +7098,16 @@ void bnxt_half_close_nic(struct bnxt *bp)
static int bnxt_open(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
+ int rc;
+
+ bnxt_hwrm_if_change(bp, true);
+ rc = __bnxt_open_nic(bp, true, true);
+ if (rc)
+ bnxt_hwrm_if_change(bp, false);
+
+ bnxt_hwmon_open(bp);
- return __bnxt_open_nic(bp, true, true);
+ return rc;
}
static bool bnxt_drv_busy(struct bnxt *bp)
@@ -7044,8 +7169,10 @@ static int bnxt_close(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
+ bnxt_hwmon_close(bp);
bnxt_close_nic(bp, true, true);
bnxt_hwrm_shutdown_link(bp);
+ bnxt_hwrm_if_change(bp, false);
return 0;
}
@@ -7295,7 +7422,7 @@ skip_uc:
static bool bnxt_can_reserve_rings(struct bnxt *bp)
{
#ifdef CONFIG_BNXT_SRIOV
- if ((bp->flags & BNXT_FLAG_NEW_RM) && BNXT_VF(bp)) {
+ if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
/* No minimum rings were provisioned by the PF. Don't
@@ -7345,7 +7472,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
return false;
}
- if (!(bp->flags & BNXT_FLAG_NEW_RM))
+ if (!BNXT_NEW_RM(bp))
return true;
if (vnics == bp->hw_resc.resv_vnics)
@@ -7579,6 +7706,16 @@ static void bnxt_timer(struct timer_list *t)
set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
bnxt_queue_sp_work(bp);
}
+
+ if (bp->link_info.phy_retry) {
+ if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
+ bp->link_info.phy_retry = 0;
+ netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
+ } else {
+ set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
+ bnxt_queue_sp_work(bp);
+ }
+ }
bnxt_restart_timer:
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
@@ -7666,6 +7803,19 @@ static void bnxt_sp_task(struct work_struct *work)
netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
rc);
}
+ if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
+ int rc;
+
+ mutex_lock(&bp->link_lock);
+ rc = bnxt_update_phy_setting(bp);
+ mutex_unlock(&bp->link_lock);
+ if (rc) {
+ netdev_warn(bp->dev, "update phy settings retry failed\n");
+ } else {
+ bp->link_info.phy_retry = false;
+ netdev_info(bp->dev, "update phy settings retry succeeded\n");
+ }
+ }
if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
mutex_lock(&bp->link_lock);
bnxt_get_port_module_status(bp);
@@ -7718,7 +7868,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx_rings <<= 1;
cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
- if (bp->flags & BNXT_FLAG_NEW_RM)
+ if (BNXT_NEW_RM(bp))
cp += bnxt_get_ulp_msix_num(bp);
return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
vnics);
@@ -8727,7 +8877,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
- if (bp->flags & BNXT_FLAG_SHORT_CMD) {
+ if (bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) {
rc = bnxt_alloc_hwrm_short_cmd_req(bp);
if (rc)
goto init_err_pci_clean;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 934aa11c82eb..fefa011320e0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -12,11 +12,11 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.9.1"
+#define DRV_MODULE_VERSION "1.9.2"
#define DRV_VER_MAJ 1
#define DRV_VER_MIN 9
-#define DRV_VER_UPD 1
+#define DRV_VER_UPD 2
#include <linux/interrupt.h>
#include <linux/rhashtable.h>
@@ -326,6 +326,10 @@ struct rx_tpa_start_cmp_ext {
((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \
RX_TPA_START_CMP_CFA_CODE) >> RX_TPA_START_CMPL_CFA_CODE_SHIFT)
+#define TPA_START_IS_IPV6(rx_tpa_start) \
+ (!!((rx_tpa_start)->rx_tpa_start_cmp_flags2 & \
+ cpu_to_le32(RX_TPA_START_CMP_FLAGS2_IP_TYPE)))
+
struct rx_tpa_end_cmp {
__le32 rx_tpa_end_cmp_len_flags_type;
#define RX_TPA_END_CMP_TYPE (0x3f << 0)
@@ -862,6 +866,7 @@ struct bnxt_pf_info {
u8 vf_resv_strategy;
#define BNXT_VF_RESV_STRATEGY_MAXIMAL 0
#define BNXT_VF_RESV_STRATEGY_MINIMAL 1
+#define BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC 2
void *hwrm_cmd_req_addr[4];
dma_addr_t hwrm_cmd_req_dma_addr[4];
struct bnxt_vf_info *vf;
@@ -959,6 +964,9 @@ struct bnxt_link_info {
u16 advertising; /* user adv setting */
bool force_link_chng;
+ bool phy_retry;
+ unsigned long phy_retry_expires;
+
/* a copy of phy_qcfg output used to report link
* info to VF
*/
@@ -990,6 +998,8 @@ struct bnxt_led_info {
struct bnxt_test_info {
u8 offline_mask;
+ u8 flags;
+#define BNXT_TEST_FL_EXT_LPBK 0x1
u16 timeout;
char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
};
@@ -1134,7 +1144,6 @@ struct bnxt {
atomic_t intr_sem;
u32 flags;
- #define BNXT_FLAG_DCB_ENABLED 0x1
#define BNXT_FLAG_VF 0x2
#define BNXT_FLAG_LRO 0x4
#ifdef CONFIG_INET
@@ -1163,15 +1172,11 @@ struct bnxt {
BNXT_FLAG_ROCEV2_CAP)
#define BNXT_FLAG_NO_AGG_RINGS 0x20000
#define BNXT_FLAG_RX_PAGE_MODE 0x40000
- #define BNXT_FLAG_FW_LLDP_AGENT 0x80000
#define BNXT_FLAG_MULTI_HOST 0x100000
- #define BNXT_FLAG_SHORT_CMD 0x200000
#define BNXT_FLAG_DOUBLE_DB 0x400000
- #define BNXT_FLAG_FW_DCBX_AGENT 0x800000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_DIM 0x2000000
#define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
- #define BNXT_FLAG_NEW_RM 0x8000000
#define BNXT_FLAG_PORT_STATS_EXT 0x10000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
@@ -1276,10 +1281,19 @@ struct bnxt {
struct ieee_ets *ieee_ets;
u8 dcbx_cap;
u8 default_pri;
+ u8 max_dscp_value;
#endif /* CONFIG_BNXT_DCB */
u32 msg_enable;
+ u32 fw_cap;
+ #define BNXT_FW_CAP_SHORT_CMD 0x00000001
+ #define BNXT_FW_CAP_LLDP_AGENT 0x00000002
+ #define BNXT_FW_CAP_DCBX_AGENT 0x00000004
+ #define BNXT_FW_CAP_NEW_RM 0x00000008
+ #define BNXT_FW_CAP_IF_CHANGE 0x00000010
+
+#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
u32 hwrm_intr_seq_id;
@@ -1342,6 +1356,7 @@ struct bnxt {
#define BNXT_GENEVE_DEL_PORT_SP_EVENT 13
#define BNXT_LINK_SPEED_CHNG_SP_EVENT 14
#define BNXT_FLOW_STATS_SP_EVENT 15
+#define BNXT_UPDATE_PHY_SP_EVENT 16
struct bnxt_hw_resc hw_resc;
struct bnxt_pf_info pf;
@@ -1397,6 +1412,7 @@ struct bnxt {
struct bnxt_tc_info *tc_info;
struct dentry *debugfs_pdev;
struct dentry *debugfs_dim;
+ struct device *hwmon_dev;
};
#define BNXT_RX_STATS_OFFSET(counter) \
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
new file mode 100644
index 000000000000..09c22f8fe399
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
@@ -0,0 +1,66 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2018 Broadcom Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_COREDUMP_H
+#define BNXT_COREDUMP_H
+
+struct bnxt_coredump_segment_hdr {
+ __u8 signature[4];
+ __le32 component_id;
+ __le32 segment_id;
+ __le32 flags;
+ __u8 low_version;
+ __u8 high_version;
+ __le16 function_id;
+ __le32 offset;
+ __le32 length;
+ __le32 status;
+ __le32 duration;
+ __le32 data_offset;
+ __le32 instance;
+ __le32 rsvd[5];
+};
+
+struct bnxt_coredump_record {
+ __u8 signature[4];
+ __le32 flags;
+ __u8 low_version;
+ __u8 high_version;
+ __u8 asic_state;
+ __u8 rsvd0[5];
+ char system_name[32];
+ __le16 year;
+ __le16 month;
+ __le16 day;
+ __le16 hour;
+ __le16 minute;
+ __le16 second;
+ __le16 utc_bias;
+ __le16 rsvd1;
+ char commandline[256];
+ __le32 total_segments;
+ __le32 os_ver_major;
+ __le32 os_ver_minor;
+ __le32 rsvd2;
+ char os_name[32];
+ __le16 end_year;
+ __le16 end_month;
+ __le16 end_day;
+ __le16 end_hour;
+ __le16 end_minute;
+ __le16 end_second;
+ __le16 end_utc_bias;
+ __le32 asic_id1;
+ __le32 asic_id2;
+ __le32 coredump_status;
+ __u8 ioctl_low_version;
+ __u8 ioctl_high_version;
+ __le16 rsvd3[313];
+};
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index d5bc72cecde3..ddc98c359488 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -385,6 +385,61 @@ set_app_exit:
return rc;
}
+static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt *bp)
+{
+ struct hwrm_queue_dscp_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_queue_dscp_qcaps_input req = {0};
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10800 || BNXT_VF(bp))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP_QCAPS, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ bp->max_dscp_value = (1 << resp->num_dscp_bits) - 1;
+ if (bp->max_dscp_value < 0x3f)
+ bp->max_dscp_value = 0;
+ }
+
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app,
+ bool add)
+{
+ struct hwrm_queue_dscp2pri_cfg_input req = {0};
+ struct bnxt_dscp2pri_entry *dscp2pri;
+ dma_addr_t mapping;
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10800)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP2PRI_CFG, -1, -1);
+ dscp2pri = dma_alloc_coherent(&bp->pdev->dev, sizeof(*dscp2pri),
+ &mapping, GFP_KERNEL);
+ if (!dscp2pri)
+ return -ENOMEM;
+
+ req.src_data_addr = cpu_to_le64(mapping);
+ dscp2pri->dscp = app->protocol;
+ if (add)
+ dscp2pri->mask = 0x3f;
+ else
+ dscp2pri->mask = 0;
+ dscp2pri->pri = app->priority;
+ req.entry_cnt = cpu_to_le16(1);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ rc = -EIO;
+ dma_free_coherent(&bp->pdev->dev, sizeof(*dscp2pri), dscp2pri,
+ mapping);
+ return rc;
+}
+
static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
{
int total_ets_bw = 0;
@@ -551,15 +606,30 @@ static int bnxt_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
return rc;
}
+static int bnxt_dcbnl_ieee_dscp_app_prep(struct bnxt *bp, struct dcb_app *app)
+{
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP) {
+ if (!bp->max_dscp_value)
+ return -ENOTSUPP;
+ if (app->protocol > bp->max_dscp_value)
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
{
struct bnxt *bp = netdev_priv(dev);
- int rc = -EINVAL;
+ int rc;
if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
!(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
return -EINVAL;
+ rc = bnxt_dcbnl_ieee_dscp_app_prep(bp, app);
+ if (rc)
+ return rc;
+
rc = dcb_ieee_setapp(dev, app);
if (rc)
return rc;
@@ -570,6 +640,9 @@ static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
app->protocol == ROCE_V2_UDP_DPORT))
rc = bnxt_hwrm_set_dcbx_app(bp, app, true);
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ rc = bnxt_hwrm_queue_dscp2pri_cfg(bp, app, true);
+
return rc;
}
@@ -582,6 +655,10 @@ static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
!(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
return -EINVAL;
+ rc = bnxt_dcbnl_ieee_dscp_app_prep(bp, app);
+ if (rc)
+ return rc;
+
rc = dcb_ieee_delapp(dev, app);
if (rc)
return rc;
@@ -591,6 +668,9 @@ static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
app->protocol == ROCE_V2_UDP_DPORT))
rc = bnxt_hwrm_set_dcbx_app(bp, app, false);
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ rc = bnxt_hwrm_queue_dscp2pri_cfg(bp, app, false);
+
return rc;
}
@@ -610,7 +690,7 @@ static u8 bnxt_dcbnl_setdcbx(struct net_device *dev, u8 mode)
return 1;
if (mode & DCB_CAP_DCBX_HOST) {
- if (BNXT_VF(bp) || (bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
+ if (BNXT_VF(bp) || (bp->fw_cap & BNXT_FW_CAP_LLDP_AGENT))
return 1;
/* only support IEEE */
@@ -642,10 +722,11 @@ void bnxt_dcb_init(struct bnxt *bp)
if (bp->hwrm_spec_code < 0x10501)
return;
+ bnxt_hwrm_queue_dscp_qcaps(bp);
bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE;
- if (BNXT_PF(bp) && !(bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
+ if (BNXT_PF(bp) && !(bp->fw_cap & BNXT_FW_CAP_LLDP_AGENT))
bp->dcbx_cap |= DCB_CAP_DCBX_HOST;
- else if (bp->flags & BNXT_FLAG_FW_DCBX_AGENT)
+ else if (bp->fw_cap & BNXT_FW_CAP_DCBX_AGENT)
bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
bp->dev->dcbnl_ops = &dcbnl_ops;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
index 69efde785f23..6eed231de565 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
@@ -33,10 +33,20 @@ struct bnxt_cos2bw_cfg {
u8 unused;
};
+struct bnxt_dscp2pri_entry {
+ u8 dscp;
+ u8 mask;
+ u8 pri;
+};
+
#define BNXT_LLQ(q_profile) \
((q_profile) == \
QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE)
+#define BNXT_CNPQ(q_profile) \
+ ((q_profile) == \
+ QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP)
+
#define HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL 0x0300
void bnxt_dcb_init(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 7bd96ab4f7c5..f3b9fbcc705b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -29,7 +29,7 @@ static const struct bnxt_dl_nvm_param nvm_params[] = {
static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
int msg_len, union devlink_param_value *val)
{
- struct hwrm_nvm_variable_input *req = msg;
+ struct hwrm_nvm_get_variable_input *req = msg;
void *data_addr = NULL, *buf = NULL;
struct bnxt_dl_nvm_param nvm_param;
int bytesize, idx = 0, rc, i;
@@ -60,18 +60,18 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
if (!data_addr)
return -ENOMEM;
- req->data_addr = cpu_to_le64(data_dma_addr);
+ req->dest_data_addr = cpu_to_le64(data_dma_addr);
req->data_len = cpu_to_le16(nvm_param.num_bits);
req->option_num = cpu_to_le16(nvm_param.offset);
req->index_0 = cpu_to_le16(idx);
if (idx)
req->dimensions = cpu_to_le16(1);
- if (req->req_type == HWRM_NVM_SET_VARIABLE)
+ if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE))
memcpy(data_addr, buf, bytesize);
rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
- if (!rc && req->req_type == HWRM_NVM_GET_VARIABLE)
+ if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE))
memcpy(buf, data_addr, bytesize);
dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 7270c8b0cef3..b6dbc3f6d309 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -16,12 +16,15 @@
#include <linux/etherdevice.h>
#include <linux/crc32.h>
#include <linux/firmware.h>
+#include <linux/utsname.h>
+#include <linux/time.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_xdp.h"
#include "bnxt_ethtool.h"
#include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
+#include "bnxt_coredump.h"
#define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100)
#define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
#define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
@@ -112,6 +115,11 @@ static int bnxt_set_coalesce(struct net_device *dev,
BNXT_MAX_STATS_COAL_TICKS);
stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
bp->stats_coal_ticks = stats_ticks;
+ if (bp->stats_coal_ticks)
+ bp->current_interval =
+ bp->stats_coal_ticks * HZ / 1000000;
+ else
+ bp->current_interval = BNXT_TIMER_INTERVAL;
update_stats = true;
}
@@ -162,7 +170,7 @@ static const struct {
BNXT_RX_STATS_ENTRY(rx_128b_255b_frames),
BNXT_RX_STATS_ENTRY(rx_256b_511b_frames),
BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames),
- BNXT_RX_STATS_ENTRY(rx_1024b_1518_frames),
+ BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames),
BNXT_RX_STATS_ENTRY(rx_good_vlan_frames),
BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames),
BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames),
@@ -205,9 +213,9 @@ static const struct {
BNXT_TX_STATS_ENTRY(tx_128b_255b_frames),
BNXT_TX_STATS_ENTRY(tx_256b_511b_frames),
BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames),
- BNXT_TX_STATS_ENTRY(tx_1024b_1518_frames),
+ BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames),
BNXT_TX_STATS_ENTRY(tx_good_vlan_frames),
- BNXT_TX_STATS_ENTRY(tx_1519b_2047_frames),
+ BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames),
BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames),
BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames),
BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames),
@@ -463,7 +471,7 @@ static void bnxt_get_channels(struct net_device *dev,
int max_tx_sch_inputs;
/* Get the most up-to-date max_tx_sch_inputs. */
- if (bp->flags & BNXT_FLAG_NEW_RM)
+ if (BNXT_NEW_RM(bp))
bnxt_hwrm_func_resc_qcaps(bp, false);
max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
@@ -2392,7 +2400,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
return rc;
}
-static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable)
+static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext)
{
struct hwrm_port_phy_cfg_input req = {0};
@@ -2400,7 +2408,10 @@ static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable)
if (enable) {
bnxt_disable_an_for_lpbk(bp, &req);
- req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
+ if (ext)
+ req.lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL;
+ else
+ req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
} else {
req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
}
@@ -2533,15 +2544,17 @@ static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
return rc;
}
-#define BNXT_DRV_TESTS 3
+#define BNXT_DRV_TESTS 4
#define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS)
#define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1)
-#define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2)
+#define BNXT_EXTLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2)
+#define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 3)
static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
u64 *buf)
{
struct bnxt *bp = netdev_priv(dev);
+ bool do_ext_lpbk = false;
bool offline = false;
u8 test_results = 0;
u8 test_mask = 0;
@@ -2555,6 +2568,10 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
return;
}
+ if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) &&
+ (bp->test_info->flags & BNXT_TEST_FL_EXT_LPBK))
+ do_ext_lpbk = true;
+
if (etest->flags & ETH_TEST_FL_OFFLINE) {
if (bp->pf.active_vfs) {
etest->flags |= ETH_TEST_FL_FAILED;
@@ -2595,13 +2612,22 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
buf[BNXT_MACLPBK_TEST_IDX] = 0;
bnxt_hwrm_mac_loopback(bp, false);
- bnxt_hwrm_phy_loopback(bp, true);
+ bnxt_hwrm_phy_loopback(bp, true, false);
msleep(1000);
if (bnxt_run_loopback(bp)) {
buf[BNXT_PHYLPBK_TEST_IDX] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
- bnxt_hwrm_phy_loopback(bp, false);
+ if (do_ext_lpbk) {
+ etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+ bnxt_hwrm_phy_loopback(bp, true, true);
+ msleep(1000);
+ if (bnxt_run_loopback(bp)) {
+ buf[BNXT_EXTLPBK_TEST_IDX] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+ }
+ bnxt_hwrm_phy_loopback(bp, false, false);
bnxt_half_close_nic(bp);
bnxt_open_nic(bp, false, true);
}
@@ -2662,6 +2688,334 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
return rc;
}
+static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len,
+ struct bnxt_hwrm_dbg_dma_info *info)
+{
+ struct hwrm_dbg_cmn_output *cmn_resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_dbg_cmn_input *cmn_req = msg;
+ __le16 *seq_ptr = msg + info->seq_off;
+ u16 seq = 0, len, segs_off;
+ void *resp = cmn_resp;
+ dma_addr_t dma_handle;
+ int rc, off = 0;
+ void *dma_buf;
+
+ dma_buf = dma_alloc_coherent(&bp->pdev->dev, info->dma_len, &dma_handle,
+ GFP_KERNEL);
+ if (!dma_buf)
+ return -ENOMEM;
+
+ segs_off = offsetof(struct hwrm_dbg_coredump_list_output,
+ total_segments);
+ cmn_req->host_dest_addr = cpu_to_le64(dma_handle);
+ cmn_req->host_buf_len = cpu_to_le32(info->dma_len);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ while (1) {
+ *seq_ptr = cpu_to_le16(seq);
+ rc = _hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+
+ len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off)));
+ if (!seq &&
+ cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
+ info->segs = le16_to_cpu(*((__le16 *)(resp +
+ segs_off)));
+ if (!info->segs) {
+ rc = -EIO;
+ break;
+ }
+
+ info->dest_buf_size = info->segs *
+ sizeof(struct coredump_segment_record);
+ info->dest_buf = kmalloc(info->dest_buf_size,
+ GFP_KERNEL);
+ if (!info->dest_buf) {
+ rc = -ENOMEM;
+ break;
+ }
+ }
+
+ if (info->dest_buf)
+ memcpy(info->dest_buf + off, dma_buf, len);
+
+ if (cmn_req->req_type ==
+ cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
+ info->dest_buf_size += len;
+
+ if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
+ break;
+
+ seq++;
+ off += len;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ dma_free_coherent(&bp->pdev->dev, info->dma_len, dma_buf, dma_handle);
+ return rc;
+}
+
+static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
+ struct bnxt_coredump *coredump)
+{
+ struct hwrm_dbg_coredump_list_input req = {0};
+ struct bnxt_hwrm_dbg_dma_info info = {NULL};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_LIST, -1, -1);
+
+ info.dma_len = COREDUMP_LIST_BUF_LEN;
+ info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no);
+ info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output,
+ data_len);
+
+ rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
+ if (!rc) {
+ coredump->data = info.dest_buf;
+ coredump->data_size = info.dest_buf_size;
+ coredump->total_segs = info.segs;
+ }
+ return rc;
+}
+
+static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
+ u16 segment_id)
+{
+ struct hwrm_dbg_coredump_initiate_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_INITIATE, -1, -1);
+ req.component_id = cpu_to_le16(component_id);
+ req.segment_id = cpu_to_le16(segment_id);
+
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
+ u16 segment_id, u32 *seg_len,
+ void *buf, u32 offset)
+{
+ struct hwrm_dbg_coredump_retrieve_input req = {0};
+ struct bnxt_hwrm_dbg_dma_info info = {NULL};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_RETRIEVE, -1, -1);
+ req.component_id = cpu_to_le16(component_id);
+ req.segment_id = cpu_to_le16(segment_id);
+
+ info.dma_len = COREDUMP_RETRIEVE_BUF_LEN;
+ info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input,
+ seq_no);
+ info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output,
+ data_len);
+ if (buf)
+ info.dest_buf = buf + offset;
+
+ rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
+ if (!rc)
+ *seg_len = info.dest_buf_size;
+
+ return rc;
+}
+
+static void
+bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
+ struct bnxt_coredump_segment_hdr *seg_hdr,
+ struct coredump_segment_record *seg_rec, u32 seg_len,
+ int status, u32 duration, u32 instance)
+{
+ memset(seg_hdr, 0, sizeof(*seg_hdr));
+ strcpy(seg_hdr->signature, "sEgM");
+ if (seg_rec) {
+ seg_hdr->component_id = (__force __le32)seg_rec->component_id;
+ seg_hdr->segment_id = (__force __le32)seg_rec->segment_id;
+ seg_hdr->low_version = seg_rec->version_low;
+ seg_hdr->high_version = seg_rec->version_hi;
+ } else {
+ /* For hwrm_ver_get response Component id = 2
+ * and Segment id = 0
+ */
+ seg_hdr->component_id = cpu_to_le32(2);
+ seg_hdr->segment_id = 0;
+ }
+ seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn);
+ seg_hdr->length = cpu_to_le32(seg_len);
+ seg_hdr->status = cpu_to_le32(status);
+ seg_hdr->duration = cpu_to_le32(duration);
+ seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr));
+ seg_hdr->instance = cpu_to_le32(instance);
+}
+
+static void
+bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
+ time64_t start, s16 start_utc, u16 total_segs,
+ int status)
+{
+ time64_t end = ktime_get_real_seconds();
+ u32 os_ver_major = 0, os_ver_minor = 0;
+ struct tm tm;
+
+ time64_to_tm(start, 0, &tm);
+ memset(record, 0, sizeof(*record));
+ strcpy(record->signature, "cOrE");
+ record->flags = 0;
+ record->low_version = 0;
+ record->high_version = 1;
+ record->asic_state = 0;
+ strncpy(record->system_name, utsname()->nodename,
+ strlen(utsname()->nodename));
+ record->year = cpu_to_le16(tm.tm_year);
+ record->month = cpu_to_le16(tm.tm_mon);
+ record->day = cpu_to_le16(tm.tm_mday);
+ record->hour = cpu_to_le16(tm.tm_hour);
+ record->minute = cpu_to_le16(tm.tm_min);
+ record->second = cpu_to_le16(tm.tm_sec);
+ record->utc_bias = cpu_to_le16(start_utc);
+ strcpy(record->commandline, "ethtool -w");
+ record->total_segments = cpu_to_le32(total_segs);
+
+ sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor);
+ record->os_ver_major = cpu_to_le32(os_ver_major);
+ record->os_ver_minor = cpu_to_le32(os_ver_minor);
+
+ strcpy(record->os_name, utsname()->sysname);
+ time64_to_tm(end, 0, &tm);
+ record->end_year = cpu_to_le16(tm.tm_year + 1900);
+ record->end_month = cpu_to_le16(tm.tm_mon + 1);
+ record->end_day = cpu_to_le16(tm.tm_mday);
+ record->end_hour = cpu_to_le16(tm.tm_hour);
+ record->end_minute = cpu_to_le16(tm.tm_min);
+ record->end_second = cpu_to_le16(tm.tm_sec);
+ record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60);
+ record->asic_id1 = cpu_to_le32(bp->chip_num << 16 |
+ bp->ver_resp.chip_rev << 8 |
+ bp->ver_resp.chip_metal);
+ record->asic_id2 = 0;
+ record->coredump_status = cpu_to_le32(status);
+ record->ioctl_low_version = 0;
+ record->ioctl_high_version = 0;
+}
+
+static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
+{
+ u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
+ struct coredump_segment_record *seg_record = NULL;
+ u32 offset = 0, seg_hdr_len, seg_record_len;
+ struct bnxt_coredump_segment_hdr seg_hdr;
+ struct bnxt_coredump_record coredump_rec;
+ struct bnxt_coredump coredump = {NULL};
+ time64_t start_time;
+ u16 start_utc;
+ int rc = 0, i;
+
+ start_time = ktime_get_real_seconds();
+ start_utc = sys_tz.tz_minuteswest * 60;
+ seg_hdr_len = sizeof(seg_hdr);
+
+ /* First segment should be hwrm_ver_get response */
+ *dump_len = seg_hdr_len + ver_get_resp_len;
+ if (buf) {
+ bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len,
+ 0, 0, 0);
+ memcpy(buf + offset, &seg_hdr, seg_hdr_len);
+ offset += seg_hdr_len;
+ memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len);
+ offset += ver_get_resp_len;
+ }
+
+ rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump);
+ if (rc) {
+ netdev_err(bp->dev, "Failed to get coredump segment list\n");
+ goto err;
+ }
+
+ *dump_len += seg_hdr_len * coredump.total_segs;
+
+ seg_record = (struct coredump_segment_record *)coredump.data;
+ seg_record_len = sizeof(*seg_record);
+
+ for (i = 0; i < coredump.total_segs; i++) {
+ u16 comp_id = le16_to_cpu(seg_record->component_id);
+ u16 seg_id = le16_to_cpu(seg_record->segment_id);
+ u32 duration = 0, seg_len = 0;
+ unsigned long start, end;
+
+ start = jiffies;
+
+ rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
+ if (rc) {
+ netdev_err(bp->dev,
+ "Failed to initiate coredump for seg = %d\n",
+ seg_record->segment_id);
+ goto next_seg;
+ }
+
+ /* Write segment data into the buffer */
+ rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id,
+ &seg_len, buf,
+ offset + seg_hdr_len);
+ if (rc)
+ netdev_err(bp->dev,
+ "Failed to retrieve coredump for seg = %d\n",
+ seg_record->segment_id);
+
+next_seg:
+ end = jiffies;
+ duration = jiffies_to_msecs(end - start);
+ bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len,
+ rc, duration, 0);
+
+ if (buf) {
+ /* Write segment header into the buffer */
+ memcpy(buf + offset, &seg_hdr, seg_hdr_len);
+ offset += seg_hdr_len + seg_len;
+ }
+
+ *dump_len += seg_len;
+ seg_record =
+ (struct coredump_segment_record *)((u8 *)seg_record +
+ seg_record_len);
+ }
+
+err:
+ if (buf) {
+ bnxt_fill_coredump_record(bp, &coredump_rec, start_time,
+ start_utc, coredump.total_segs + 1,
+ rc);
+ memcpy(buf + offset, &coredump_rec, sizeof(coredump_rec));
+ }
+ kfree(coredump.data);
+ *dump_len += sizeof(coredump_rec);
+
+ return rc;
+}
+
+static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (bp->hwrm_spec_code < 0x10801)
+ return -EOPNOTSUPP;
+
+ dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 |
+ bp->ver_resp.hwrm_fw_min_8b << 16 |
+ bp->ver_resp.hwrm_fw_bld_8b << 8 |
+ bp->ver_resp.hwrm_fw_rsvd_8b;
+
+ return bnxt_get_coredump(bp, NULL, &dump->len);
+}
+
+static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
+ void *buf)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (bp->hwrm_spec_code < 0x10801)
+ return -EOPNOTSUPP;
+
+ memset(buf, 0, dump->len);
+
+ return bnxt_get_coredump(bp, buf, &dump->len);
+}
+
void bnxt_ethtool_init(struct bnxt *bp)
{
struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr;
@@ -2702,6 +3056,8 @@ void bnxt_ethtool_init(struct bnxt *bp)
strcpy(str, "Mac loopback test (offline)");
} else if (i == BNXT_PHYLPBK_TEST_IDX) {
strcpy(str, "Phy loopback test (offline)");
+ } else if (i == BNXT_EXTLPBK_TEST_IDX) {
+ strcpy(str, "Ext loopback test (offline)");
} else if (i == BNXT_IRQ_TEST_IDX) {
strcpy(str, "Interrupt_test (offline)");
} else {
@@ -2763,4 +3119,6 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.set_phys_id = bnxt_set_phys_id,
.self_test = bnxt_self_test,
.reset = bnxt_reset,
+ .get_dump_flag = bnxt_get_dump_flag,
+ .get_dump_data = bnxt_get_dump_data,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index 836ef682f24c..b5b65b3f8534 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -22,6 +22,43 @@ struct bnxt_led_cfg {
u8 rsvd;
};
+#define COREDUMP_LIST_BUF_LEN 2048
+#define COREDUMP_RETRIEVE_BUF_LEN 4096
+
+struct bnxt_coredump {
+ void *data;
+ int data_size;
+ u16 total_segs;
+};
+
+struct bnxt_hwrm_dbg_dma_info {
+ void *dest_buf;
+ int dest_buf_size;
+ u16 dma_len;
+ u16 seq_off;
+ u16 data_len_off;
+ u16 segs;
+};
+
+struct hwrm_dbg_cmn_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 host_buf_len;
+};
+
+struct hwrm_dbg_cmn_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define HWRM_DBG_CMN_FLAGS_MORE 1
+};
+
#define BNXT_LED_DFLT_ENA \
(PORT_LED_CFG_REQ_ENABLES_LED0_ID | \
PORT_LED_CFG_REQ_ENABLES_LED0_STATE | \
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index c75d7fa6dab6..971ace5d0d4a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -96,6 +96,7 @@ struct hwrm_short_input {
struct cmd_nums {
__le16 req_type;
#define HWRM_VER_GET 0x0UL
+ #define HWRM_FUNC_DRV_IF_CHANGE 0xdUL
#define HWRM_FUNC_BUF_UNRGTR 0xeUL
#define HWRM_FUNC_VF_CFG 0xfUL
#define HWRM_RESERVED1 0x10UL
@@ -159,6 +160,7 @@ struct cmd_nums {
#define HWRM_RING_FREE 0x51UL
#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL
#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL
+ #define HWRM_RING_AGGINT_QCAPS 0x54UL
#define HWRM_RING_RESET 0x5eUL
#define HWRM_RING_GRP_ALLOC 0x60UL
#define HWRM_RING_GRP_FREE 0x61UL
@@ -191,6 +193,8 @@ struct cmd_nums {
#define HWRM_PORT_QSTATS_EXT 0xb4UL
#define HWRM_FW_RESET 0xc0UL
#define HWRM_FW_QSTATUS 0xc1UL
+ #define HWRM_FW_HEALTH_CHECK 0xc2UL
+ #define HWRM_FW_SYNC 0xc3UL
#define HWRM_FW_SET_TIME 0xc8UL
#define HWRM_FW_GET_TIME 0xc9UL
#define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL
@@ -269,6 +273,11 @@ struct cmd_nums {
#define HWRM_ENGINE_ON_DIE_RQE_CREDITS 0x164UL
#define HWRM_FUNC_RESOURCE_QCAPS 0x190UL
#define HWRM_FUNC_VF_RESOURCE_CFG 0x191UL
+ #define HWRM_FUNC_BACKING_STORE_QCAPS 0x192UL
+ #define HWRM_FUNC_BACKING_STORE_CFG 0x193UL
+ #define HWRM_FUNC_BACKING_STORE_QCFG 0x194UL
+ #define HWRM_FUNC_VF_BW_CFG 0x195UL
+ #define HWRM_FUNC_VF_BW_QCFG 0x196UL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -284,6 +293,8 @@ struct cmd_nums {
#define HWRM_DBG_COREDUMP_LIST 0xff17UL
#define HWRM_DBG_COREDUMP_INITIATE 0xff18UL
#define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL
+ #define HWRM_DBG_FW_CLI 0xff1aUL
+ #define HWRM_DBG_I2C_CMD 0xff1bUL
#define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
#define HWRM_NVM_VALIDATE_OPTION 0xffefUL
#define HWRM_NVM_FLUSH 0xfff0UL
@@ -318,6 +329,7 @@ struct ret_codes {
#define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
#define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
#define HWRM_ERR_CODE_NO_BUFFER 0x8UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL
#define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
#define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
#define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
@@ -344,9 +356,9 @@ struct hwrm_err_output {
#define HWRM_RESP_VALID_KEY 1
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 9
-#define HWRM_VERSION_UPDATE 1
-#define HWRM_VERSION_RSVD 15
-#define HWRM_VERSION_STR "1.9.1.15"
+#define HWRM_VERSION_UPDATE 2
+#define HWRM_VERSION_RSVD 25
+#define HWRM_VERSION_STR "1.9.2.25"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -526,6 +538,7 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
#define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
#define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
__le32 event_data2;
@@ -564,6 +577,8 @@ struct hwrm_async_event_cmpl_link_status_change {
#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_MASK 0xff00000UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_SFT 20
};
/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */
@@ -817,23 +832,26 @@ struct hwrm_func_qcaps_output {
__le16 fid;
__le16 port_id;
__le32 flags;
- #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
- #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
- #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
- #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
- #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
- #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
- #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
- #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
- #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
- #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
- #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL
- #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL
- #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL
- #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL
- #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ADOPTED_PF_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -947,58 +965,26 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
#define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA
u8 options;
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128
- #define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xfcUL
- #define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 2
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_SFT 2
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO
+ #define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xf0UL
+ #define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 4
__le16 alloc_vfs;
__le32 alloc_mcast_filters;
__le32 alloc_hw_ring_grps;
__le16 alloc_sp_tx_rings;
__le16 alloc_stat_ctx;
- u8 unused_2[7];
- u8 valid;
-};
-
-/* hwrm_func_vlan_cfg_input (size:384b/48B) */
-struct hwrm_func_vlan_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0[2];
- __le32 enables;
- #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID 0x1UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID 0x2UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP 0x4UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP 0x8UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID 0x10UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID 0x20UL
- __le16 stag_vid;
- u8 stag_pcp;
- u8 unused_1;
- __be16 stag_tpid;
- __le16 ctag_vid;
- u8 ctag_pcp;
- u8 unused_2;
- __be16 ctag_tpid;
- __le32 rsvd1;
- __le32 rsvd2;
- u8 unused_3[4];
-};
-
-/* hwrm_func_vlan_cfg_output (size:128b/16B) */
-struct hwrm_func_vlan_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
+ __le16 alloc_msix;
+ u8 unused_2[5];
u8 valid;
};
@@ -1010,7 +996,7 @@ struct hwrm_func_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le16 fid;
- u8 unused_0[2];
+ __le16 num_msix;
__le32 flags;
#define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL
#define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL
@@ -1050,6 +1036,8 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
#define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
#define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL
+ #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL
__le16 mtu;
__le16 mru;
__le16 num_rsscos_ctxs;
@@ -1109,13 +1097,19 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
#define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA
u8 options;
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128
- #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xfcUL
- #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 2
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_SFT 2
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO
+ #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xf0UL
+ #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4
__le16 num_mcast_filters;
};
@@ -1212,30 +1206,6 @@ struct hwrm_func_vf_resc_free_output {
u8 valid;
};
-/* hwrm_func_vf_vnic_ids_query_input (size:256b/32B) */
-struct hwrm_func_vf_vnic_ids_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vf_id;
- u8 unused_0[2];
- __le32 max_vnic_id_cnt;
- __le64 vnic_id_tbl_addr;
-};
-
-/* hwrm_func_vf_vnic_ids_query_output (size:128b/16B) */
-struct hwrm_func_vf_vnic_ids_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 vnic_id_cnt;
- u8 unused_0[3];
- u8 valid;
-};
-
/* hwrm_func_drv_rgtr_input (size:896b/112B) */
struct hwrm_func_drv_rgtr_input {
__le16 req_type;
@@ -1286,7 +1256,9 @@ struct hwrm_func_drv_rgtr_output {
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
- u8 unused_0[7];
+ __le32 flags;
+ #define FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED 0x1UL
+ u8 unused_0[3];
u8 valid;
};
@@ -1372,7 +1344,7 @@ struct hwrm_func_drv_qver_input {
u8 unused_0[2];
};
-/* hwrm_func_drv_qver_output (size:192b/24B) */
+/* hwrm_func_drv_qver_output (size:256b/32B) */
struct hwrm_func_drv_qver_output {
__le16 error_code;
__le16 req_type;
@@ -1394,12 +1366,13 @@ struct hwrm_func_drv_qver_output {
u8 ver_maj_8b;
u8 ver_min_8b;
u8 ver_upd_8b;
- u8 unused_0[2];
- u8 valid;
+ u8 unused_0[3];
__le16 ver_maj;
__le16 ver_min;
__le16 ver_upd;
__le16 ver_patch;
+ u8 unused_1[7];
+ u8 valid;
};
/* hwrm_func_resource_qcaps_input (size:192b/24B) */
@@ -1493,6 +1466,410 @@ struct hwrm_func_vf_resource_cfg_output {
u8 valid;
};
+/* hwrm_func_backing_store_qcaps_input (size:128b/16B) */
+struct hwrm_func_backing_store_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_func_backing_store_qcaps_output (size:576b/72B) */
+struct hwrm_func_backing_store_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 qp_max_entries;
+ __le16 qp_min_qp1_entries;
+ __le16 qp_max_l2_entries;
+ __le16 qp_entry_size;
+ __le16 srq_max_l2_entries;
+ __le32 srq_max_entries;
+ __le16 srq_entry_size;
+ __le16 cq_max_l2_entries;
+ __le32 cq_max_entries;
+ __le16 cq_entry_size;
+ __le16 vnic_max_vnic_entries;
+ __le16 vnic_max_ring_table_entries;
+ __le16 vnic_entry_size;
+ __le32 stat_max_entries;
+ __le16 stat_entry_size;
+ __le16 tqm_entry_size;
+ __le32 tqm_min_entries_per_ring;
+ __le32 tqm_max_entries_per_ring;
+ __le32 mrav_max_entries;
+ __le16 mrav_entry_size;
+ __le16 tim_entry_size;
+ __le32 tim_max_entries;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_cfg_input (size:2048b/256B) */
+struct hwrm_func_backing_store_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL
+ __le32 enables;
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL
+ u8 qpc_pg_size_qpc_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G
+ u8 srq_pg_size_srq_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G
+ u8 cq_pg_size_cq_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G
+ u8 vnic_pg_size_vnic_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G
+ u8 stat_pg_size_stat_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G
+ u8 tqm_sp_pg_size_tqm_sp_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G
+ u8 tqm_ring0_pg_size_tqm_ring0_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G
+ u8 tqm_ring1_pg_size_tqm_ring1_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G
+ u8 tqm_ring2_pg_size_tqm_ring2_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G
+ u8 tqm_ring3_pg_size_tqm_ring3_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G
+ u8 tqm_ring4_pg_size_tqm_ring4_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G
+ u8 tqm_ring5_pg_size_tqm_ring5_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G
+ u8 tqm_ring6_pg_size_tqm_ring6_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G
+ u8 tqm_ring7_pg_size_tqm_ring7_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G
+ u8 mrav_pg_size_mrav_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G
+ u8 tim_pg_size_tim_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G
+ __le64 qpc_page_dir;
+ __le64 srq_page_dir;
+ __le64 cq_page_dir;
+ __le64 vnic_page_dir;
+ __le64 stat_page_dir;
+ __le64 tqm_sp_page_dir;
+ __le64 tqm_ring0_page_dir;
+ __le64 tqm_ring1_page_dir;
+ __le64 tqm_ring2_page_dir;
+ __le64 tqm_ring3_page_dir;
+ __le64 tqm_ring4_page_dir;
+ __le64 tqm_ring5_page_dir;
+ __le64 tqm_ring6_page_dir;
+ __le64 tqm_ring7_page_dir;
+ __le64 mrav_page_dir;
+ __le64 tim_page_dir;
+ __le32 qp_num_entries;
+ __le32 srq_num_entries;
+ __le32 cq_num_entries;
+ __le32 stat_num_entries;
+ __le32 tqm_sp_num_entries;
+ __le32 tqm_ring0_num_entries;
+ __le32 tqm_ring1_num_entries;
+ __le32 tqm_ring2_num_entries;
+ __le32 tqm_ring3_num_entries;
+ __le32 tqm_ring4_num_entries;
+ __le32 tqm_ring5_num_entries;
+ __le32 tqm_ring6_num_entries;
+ __le32 tqm_ring7_num_entries;
+ __le32 mrav_num_entries;
+ __le32 tim_num_entries;
+ __le16 qp_num_qp1_entries;
+ __le16 qp_num_l2_entries;
+ __le16 qp_entry_size;
+ __le16 srq_num_l2_entries;
+ __le16 srq_entry_size;
+ __le16 cq_num_l2_entries;
+ __le16 cq_entry_size;
+ __le16 vnic_num_vnic_entries;
+ __le16 vnic_num_ring_table_entries;
+ __le16 vnic_entry_size;
+ __le16 stat_entry_size;
+ __le16 tqm_entry_size;
+ __le16 mrav_entry_size;
+ __le16 tim_entry_size;
+};
+
+/* hwrm_func_backing_store_cfg_output (size:128b/16B) */
+struct hwrm_func_backing_store_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_if_change_input (size:192b/24B) */
+struct hwrm_func_drv_if_change_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP 0x1UL
+ __le32 unused;
+};
+
+/* hwrm_func_drv_if_change_output (size:128b/16B) */
+struct hwrm_func_drv_if_change_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
/* hwrm_port_phy_cfg_input (size:448b/56B) */
struct hwrm_port_phy_cfg_input {
__le16 req_type;
@@ -1592,10 +1969,11 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
#define PORT_PHY_CFG_REQ_WIRESPEED_LAST PORT_PHY_CFG_REQ_WIRESPEED_ON
u8 lpbk;
- #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
- #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
- #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
- #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_REMOTE
+ #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
+ #define PORT_PHY_CFG_REQ_LPBK_EXTERNAL 0x3UL
+ #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_EXTERNAL
u8 force_pause;
#define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
#define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
@@ -1751,10 +2129,11 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
#define PORT_PHY_QCFG_RESP_WIRESPEED_LAST PORT_PHY_QCFG_RESP_WIRESPEED_ON
u8 lpbk;
- #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
- #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
- #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_REMOTE
+ #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
+ #define PORT_PHY_QCFG_RESP_LPBK_EXTERNAL 0x3UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_EXTERNAL
u8 force_pause;
#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
@@ -2014,6 +2393,131 @@ struct hwrm_port_mac_ptp_qcfg_output {
u8 valid;
};
+/* tx_port_stats (size:3264b/408B) */
+struct tx_port_stats {
+ __le64 tx_64b_frames;
+ __le64 tx_65b_127b_frames;
+ __le64 tx_128b_255b_frames;
+ __le64 tx_256b_511b_frames;
+ __le64 tx_512b_1023b_frames;
+ __le64 tx_1024b_1518b_frames;
+ __le64 tx_good_vlan_frames;
+ __le64 tx_1519b_2047b_frames;
+ __le64 tx_2048b_4095b_frames;
+ __le64 tx_4096b_9216b_frames;
+ __le64 tx_9217b_16383b_frames;
+ __le64 tx_good_frames;
+ __le64 tx_total_frames;
+ __le64 tx_ucast_frames;
+ __le64 tx_mcast_frames;
+ __le64 tx_bcast_frames;
+ __le64 tx_pause_frames;
+ __le64 tx_pfc_frames;
+ __le64 tx_jabber_frames;
+ __le64 tx_fcs_err_frames;
+ __le64 tx_control_frames;
+ __le64 tx_oversz_frames;
+ __le64 tx_single_dfrl_frames;
+ __le64 tx_multi_dfrl_frames;
+ __le64 tx_single_coll_frames;
+ __le64 tx_multi_coll_frames;
+ __le64 tx_late_coll_frames;
+ __le64 tx_excessive_coll_frames;
+ __le64 tx_frag_frames;
+ __le64 tx_err;
+ __le64 tx_tagged_frames;
+ __le64 tx_dbl_tagged_frames;
+ __le64 tx_runt_frames;
+ __le64 tx_fifo_underruns;
+ __le64 tx_pfc_ena_frames_pri0;
+ __le64 tx_pfc_ena_frames_pri1;
+ __le64 tx_pfc_ena_frames_pri2;
+ __le64 tx_pfc_ena_frames_pri3;
+ __le64 tx_pfc_ena_frames_pri4;
+ __le64 tx_pfc_ena_frames_pri5;
+ __le64 tx_pfc_ena_frames_pri6;
+ __le64 tx_pfc_ena_frames_pri7;
+ __le64 tx_eee_lpi_events;
+ __le64 tx_eee_lpi_duration;
+ __le64 tx_llfc_logical_msgs;
+ __le64 tx_hcfc_msgs;
+ __le64 tx_total_collisions;
+ __le64 tx_bytes;
+ __le64 tx_xthol_frames;
+ __le64 tx_stat_discard;
+ __le64 tx_stat_error;
+};
+
+/* rx_port_stats (size:4224b/528B) */
+struct rx_port_stats {
+ __le64 rx_64b_frames;
+ __le64 rx_65b_127b_frames;
+ __le64 rx_128b_255b_frames;
+ __le64 rx_256b_511b_frames;
+ __le64 rx_512b_1023b_frames;
+ __le64 rx_1024b_1518b_frames;
+ __le64 rx_good_vlan_frames;
+ __le64 rx_1519b_2047b_frames;
+ __le64 rx_2048b_4095b_frames;
+ __le64 rx_4096b_9216b_frames;
+ __le64 rx_9217b_16383b_frames;
+ __le64 rx_total_frames;
+ __le64 rx_ucast_frames;
+ __le64 rx_mcast_frames;
+ __le64 rx_bcast_frames;
+ __le64 rx_fcs_err_frames;
+ __le64 rx_ctrl_frames;
+ __le64 rx_pause_frames;
+ __le64 rx_pfc_frames;
+ __le64 rx_unsupported_opcode_frames;
+ __le64 rx_unsupported_da_pausepfc_frames;
+ __le64 rx_wrong_sa_frames;
+ __le64 rx_align_err_frames;
+ __le64 rx_oor_len_frames;
+ __le64 rx_code_err_frames;
+ __le64 rx_false_carrier_frames;
+ __le64 rx_ovrsz_frames;
+ __le64 rx_jbr_frames;
+ __le64 rx_mtu_err_frames;
+ __le64 rx_match_crc_frames;
+ __le64 rx_promiscuous_frames;
+ __le64 rx_tagged_frames;
+ __le64 rx_double_tagged_frames;
+ __le64 rx_trunc_frames;
+ __le64 rx_good_frames;
+ __le64 rx_pfc_xon2xoff_frames_pri0;
+ __le64 rx_pfc_xon2xoff_frames_pri1;
+ __le64 rx_pfc_xon2xoff_frames_pri2;
+ __le64 rx_pfc_xon2xoff_frames_pri3;
+ __le64 rx_pfc_xon2xoff_frames_pri4;
+ __le64 rx_pfc_xon2xoff_frames_pri5;
+ __le64 rx_pfc_xon2xoff_frames_pri6;
+ __le64 rx_pfc_xon2xoff_frames_pri7;
+ __le64 rx_pfc_ena_frames_pri0;
+ __le64 rx_pfc_ena_frames_pri1;
+ __le64 rx_pfc_ena_frames_pri2;
+ __le64 rx_pfc_ena_frames_pri3;
+ __le64 rx_pfc_ena_frames_pri4;
+ __le64 rx_pfc_ena_frames_pri5;
+ __le64 rx_pfc_ena_frames_pri6;
+ __le64 rx_pfc_ena_frames_pri7;
+ __le64 rx_sch_crc_err_frames;
+ __le64 rx_undrsz_frames;
+ __le64 rx_frag_frames;
+ __le64 rx_eee_lpi_events;
+ __le64 rx_eee_lpi_duration;
+ __le64 rx_llfc_physical_msgs;
+ __le64 rx_llfc_logical_msgs;
+ __le64 rx_llfc_msgs_with_crc_err;
+ __le64 rx_hcfc_msgs;
+ __le64 rx_hcfc_msgs_with_crc_err;
+ __le64 rx_bytes;
+ __le64 rx_runt_bytes;
+ __le64 rx_runt_frames;
+ __le64 rx_stat_discard;
+ __le64 rx_stat_err;
+};
+
/* hwrm_port_qstats_input (size:320b/40B) */
struct hwrm_port_qstats_input {
__le16 req_type;
@@ -2039,6 +2543,83 @@ struct hwrm_port_qstats_output {
u8 valid;
};
+/* tx_port_stats_ext (size:2048b/256B) */
+struct tx_port_stats_ext {
+ __le64 tx_bytes_cos0;
+ __le64 tx_bytes_cos1;
+ __le64 tx_bytes_cos2;
+ __le64 tx_bytes_cos3;
+ __le64 tx_bytes_cos4;
+ __le64 tx_bytes_cos5;
+ __le64 tx_bytes_cos6;
+ __le64 tx_bytes_cos7;
+ __le64 tx_packets_cos0;
+ __le64 tx_packets_cos1;
+ __le64 tx_packets_cos2;
+ __le64 tx_packets_cos3;
+ __le64 tx_packets_cos4;
+ __le64 tx_packets_cos5;
+ __le64 tx_packets_cos6;
+ __le64 tx_packets_cos7;
+ __le64 pfc_pri0_tx_duration_us;
+ __le64 pfc_pri0_tx_transitions;
+ __le64 pfc_pri1_tx_duration_us;
+ __le64 pfc_pri1_tx_transitions;
+ __le64 pfc_pri2_tx_duration_us;
+ __le64 pfc_pri2_tx_transitions;
+ __le64 pfc_pri3_tx_duration_us;
+ __le64 pfc_pri3_tx_transitions;
+ __le64 pfc_pri4_tx_duration_us;
+ __le64 pfc_pri4_tx_transitions;
+ __le64 pfc_pri5_tx_duration_us;
+ __le64 pfc_pri5_tx_transitions;
+ __le64 pfc_pri6_tx_duration_us;
+ __le64 pfc_pri6_tx_transitions;
+ __le64 pfc_pri7_tx_duration_us;
+ __le64 pfc_pri7_tx_transitions;
+};
+
+/* rx_port_stats_ext (size:2368b/296B) */
+struct rx_port_stats_ext {
+ __le64 link_down_events;
+ __le64 continuous_pause_events;
+ __le64 resume_pause_events;
+ __le64 continuous_roce_pause_events;
+ __le64 resume_roce_pause_events;
+ __le64 rx_bytes_cos0;
+ __le64 rx_bytes_cos1;
+ __le64 rx_bytes_cos2;
+ __le64 rx_bytes_cos3;
+ __le64 rx_bytes_cos4;
+ __le64 rx_bytes_cos5;
+ __le64 rx_bytes_cos6;
+ __le64 rx_bytes_cos7;
+ __le64 rx_packets_cos0;
+ __le64 rx_packets_cos1;
+ __le64 rx_packets_cos2;
+ __le64 rx_packets_cos3;
+ __le64 rx_packets_cos4;
+ __le64 rx_packets_cos5;
+ __le64 rx_packets_cos6;
+ __le64 rx_packets_cos7;
+ __le64 pfc_pri0_rx_duration_us;
+ __le64 pfc_pri0_rx_transitions;
+ __le64 pfc_pri1_rx_duration_us;
+ __le64 pfc_pri1_rx_transitions;
+ __le64 pfc_pri2_rx_duration_us;
+ __le64 pfc_pri2_rx_transitions;
+ __le64 pfc_pri3_rx_duration_us;
+ __le64 pfc_pri3_rx_transitions;
+ __le64 pfc_pri4_rx_duration_us;
+ __le64 pfc_pri4_rx_transitions;
+ __le64 pfc_pri5_rx_duration_us;
+ __le64 pfc_pri5_rx_transitions;
+ __le64 pfc_pri6_rx_duration_us;
+ __le64 pfc_pri6_rx_transitions;
+ __le64 pfc_pri7_rx_duration_us;
+ __le64 pfc_pri7_rx_transitions;
+};
+
/* hwrm_port_qstats_ext_input (size:320b/40B) */
struct hwrm_port_qstats_ext_input {
__le16 req_type;
@@ -2062,7 +2643,8 @@ struct hwrm_port_qstats_ext_output {
__le16 resp_len;
__le16 tx_stat_size;
__le16 rx_stat_size;
- u8 unused_0[3];
+ __le16 total_active_cos_queues;
+ u8 unused_0;
u8 valid;
};
@@ -2153,9 +2735,10 @@ struct hwrm_port_phy_qcaps_output {
__le16 seq_id;
__le16 resp_len;
u8 flags;
- #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfeUL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 1
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfcUL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 2
u8 port_cnt;
#define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
@@ -2612,6 +3195,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id0;
u8 queue_id0_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -2620,6 +3204,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id1;
u8 queue_id1_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -2628,6 +3213,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id2;
u8 queue_id2_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -2636,6 +3222,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id3;
u8 queue_id3_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -2644,6 +3231,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id4;
u8 queue_id4_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -2652,6 +3240,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id5;
u8 queue_id5_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -2660,6 +3249,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id6;
u8 queue_id6_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -2668,6 +3258,7 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id7;
u8 queue_id7_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
@@ -3689,18 +4280,21 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
#define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
__le32 enables;
- #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
- #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
- #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
- #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
- #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
+ #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
+ #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
+ #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
+ #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
+ #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
+ #define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID 0x20UL
+ #define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL
__le16 vnic_id;
__le16 dflt_ring_grp;
__le16 rss_rule;
__le16 cos_rule;
__le16 lb_rule;
__le16 mru;
- u8 unused_0[4];
+ __le16 default_rx_ring_id;
+ __le16 default_cmpl_ring_id;
};
/* hwrm_vnic_cfg_output (size:128b/16B) */
@@ -3740,6 +4334,7 @@ struct hwrm_vnic_qcaps_output {
#define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
#define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
#define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
+ #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
u8 unused_1[7];
u8 valid;
};
@@ -3857,7 +4452,14 @@ struct hwrm_vnic_rss_cfg_input {
#define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
#define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
#define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
- u8 unused_0[4];
+ __le16 vnic_id;
+ u8 ring_table_pair_index;
+ u8 hash_mode_flags;
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_2 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL
__le64 ring_grp_tbl_addr;
__le64 hash_key_tbl_addr;
__le16 rss_ctx_idx;
@@ -3950,7 +4552,7 @@ struct hwrm_vnic_rss_cos_lb_ctx_free_output {
u8 valid;
};
-/* hwrm_ring_alloc_input (size:640b/80B) */
+/* hwrm_ring_alloc_input (size:704b/88B) */
struct hwrm_ring_alloc_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -3961,12 +4563,17 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
#define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
#define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
+ #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
+ #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
+ #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
u8 ring_type;
#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
#define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
#define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
#define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL
+ #define RING_ALLOC_REQ_RING_TYPE_RX_AGG 0x4UL
+ #define RING_ALLOC_REQ_RING_TYPE_NQ 0x5UL
+ #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_NQ
u8 unused_0[3];
__le64 page_tbl_addr;
__le32 fbo;
@@ -3977,8 +4584,9 @@ struct hwrm_ring_alloc_input {
__le16 logical_id;
__le16 cmpl_ring_id;
__le16 queue_id;
- u8 unused_2[2];
- __le32 reserved1;
+ __le16 rx_buf_size;
+ __le16 rx_ring_id;
+ __le16 nq_ring_id;
__le16 ring_arb_cfg;
#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL
#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0
@@ -4016,6 +4624,7 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
#define RING_ALLOC_REQ_INT_MODE_LAST RING_ALLOC_REQ_INT_MODE_POLL
u8 unused_4[3];
+ __le64 cq_handle;
};
/* hwrm_ring_alloc_output (size:128b/16B) */
@@ -4042,7 +4651,9 @@ struct hwrm_ring_free_input {
#define RING_FREE_REQ_RING_TYPE_TX 0x1UL
#define RING_FREE_REQ_RING_TYPE_RX 0x2UL
#define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_ROCE_CMPL
+ #define RING_FREE_REQ_RING_TYPE_RX_AGG 0x4UL
+ #define RING_FREE_REQ_RING_TYPE_NQ 0x5UL
+ #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_NQ
u8 unused_0;
__le16 ring_id;
u8 unused_1[4];
@@ -4058,6 +4669,52 @@ struct hwrm_ring_free_output {
u8 valid;
};
+/* hwrm_ring_aggint_qcaps_input (size:128b/16B) */
+struct hwrm_ring_aggint_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_ring_aggint_qcaps_output (size:384b/48B) */
+struct hwrm_ring_aggint_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 cmpl_params;
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN 0x1UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MAX 0x2UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET 0x4UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE 0x8UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR 0x10UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT 0x20UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR 0x40UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT 0x80UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT 0x100UL
+ __le32 nq_params;
+ #define RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN 0x1UL
+ __le16 num_cmpl_dma_aggr_min;
+ __le16 num_cmpl_dma_aggr_max;
+ __le16 num_cmpl_dma_aggr_during_int_min;
+ __le16 num_cmpl_dma_aggr_during_int_max;
+ __le16 cmpl_aggr_dma_tmr_min;
+ __le16 cmpl_aggr_dma_tmr_max;
+ __le16 cmpl_aggr_dma_tmr_during_int_min;
+ __le16 cmpl_aggr_dma_tmr_during_int_max;
+ __le16 int_lat_tmr_min_min;
+ __le16 int_lat_tmr_min_max;
+ __le16 int_lat_tmr_max_min;
+ __le16 int_lat_tmr_max_max;
+ __le16 num_cmpl_aggr_int_min;
+ __le16 num_cmpl_aggr_int_max;
+ __le16 timer_units;
+ u8 unused_0[1];
+ u8 valid;
+};
+
/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */
struct hwrm_ring_cmpl_ring_qaggint_params_input {
__le16 req_type;
@@ -4100,6 +4757,7 @@ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
__le16 flags;
#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
__le16 num_cmpl_dma_aggr;
__le16 num_cmpl_dma_aggr_during_int;
__le16 cmpl_aggr_dma_tmr;
@@ -4107,7 +4765,14 @@ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
__le16 int_lat_tmr_min;
__le16 int_lat_tmr_max;
__le16 num_cmpl_aggr_int;
- u8 unused_0[6];
+ __le16 enables;
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR 0x1UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT 0x2UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_CMPL_AGGR_DMA_TMR 0x4UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MIN 0x8UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MAX 0x10UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_AGGR_INT 0x20UL
+ u8 unused_0[4];
};
/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */
@@ -4120,34 +4785,6 @@ struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
u8 valid;
};
-/* hwrm_ring_reset_input (size:192b/24B) */
-struct hwrm_ring_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 ring_type;
- #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
- #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
- #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_ROCE_CMPL
- u8 unused_0;
- __le16 ring_id;
- u8 unused_1[4];
-};
-
-/* hwrm_ring_reset_output (size:128b/16B) */
-struct hwrm_ring_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
/* hwrm_ring_grp_alloc_input (size:192b/24B) */
struct hwrm_ring_grp_alloc_input {
__le16 req_type;
@@ -5032,7 +5669,8 @@ struct hwrm_tunnel_dst_port_query_input {
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1
u8 unused_0[7];
};
@@ -5059,7 +5697,8 @@ struct hwrm_tunnel_dst_port_alloc_input {
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1
u8 unused_0;
__be16 tunnel_dst_port_val;
u8 unused_1[4];
@@ -5087,7 +5726,8 @@ struct hwrm_tunnel_dst_port_free_input {
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1
u8 unused_0;
__le16 tunnel_dst_port_id;
u8 unused_1[4];
@@ -5259,140 +5899,6 @@ struct hwrm_pcie_qstats_output {
u8 valid;
};
-/* tx_port_stats (size:3264b/408B) */
-struct tx_port_stats {
- __le64 tx_64b_frames;
- __le64 tx_65b_127b_frames;
- __le64 tx_128b_255b_frames;
- __le64 tx_256b_511b_frames;
- __le64 tx_512b_1023b_frames;
- __le64 tx_1024b_1518_frames;
- __le64 tx_good_vlan_frames;
- __le64 tx_1519b_2047_frames;
- __le64 tx_2048b_4095b_frames;
- __le64 tx_4096b_9216b_frames;
- __le64 tx_9217b_16383b_frames;
- __le64 tx_good_frames;
- __le64 tx_total_frames;
- __le64 tx_ucast_frames;
- __le64 tx_mcast_frames;
- __le64 tx_bcast_frames;
- __le64 tx_pause_frames;
- __le64 tx_pfc_frames;
- __le64 tx_jabber_frames;
- __le64 tx_fcs_err_frames;
- __le64 tx_control_frames;
- __le64 tx_oversz_frames;
- __le64 tx_single_dfrl_frames;
- __le64 tx_multi_dfrl_frames;
- __le64 tx_single_coll_frames;
- __le64 tx_multi_coll_frames;
- __le64 tx_late_coll_frames;
- __le64 tx_excessive_coll_frames;
- __le64 tx_frag_frames;
- __le64 tx_err;
- __le64 tx_tagged_frames;
- __le64 tx_dbl_tagged_frames;
- __le64 tx_runt_frames;
- __le64 tx_fifo_underruns;
- __le64 tx_pfc_ena_frames_pri0;
- __le64 tx_pfc_ena_frames_pri1;
- __le64 tx_pfc_ena_frames_pri2;
- __le64 tx_pfc_ena_frames_pri3;
- __le64 tx_pfc_ena_frames_pri4;
- __le64 tx_pfc_ena_frames_pri5;
- __le64 tx_pfc_ena_frames_pri6;
- __le64 tx_pfc_ena_frames_pri7;
- __le64 tx_eee_lpi_events;
- __le64 tx_eee_lpi_duration;
- __le64 tx_llfc_logical_msgs;
- __le64 tx_hcfc_msgs;
- __le64 tx_total_collisions;
- __le64 tx_bytes;
- __le64 tx_xthol_frames;
- __le64 tx_stat_discard;
- __le64 tx_stat_error;
-};
-
-/* rx_port_stats (size:4224b/528B) */
-struct rx_port_stats {
- __le64 rx_64b_frames;
- __le64 rx_65b_127b_frames;
- __le64 rx_128b_255b_frames;
- __le64 rx_256b_511b_frames;
- __le64 rx_512b_1023b_frames;
- __le64 rx_1024b_1518_frames;
- __le64 rx_good_vlan_frames;
- __le64 rx_1519b_2047b_frames;
- __le64 rx_2048b_4095b_frames;
- __le64 rx_4096b_9216b_frames;
- __le64 rx_9217b_16383b_frames;
- __le64 rx_total_frames;
- __le64 rx_ucast_frames;
- __le64 rx_mcast_frames;
- __le64 rx_bcast_frames;
- __le64 rx_fcs_err_frames;
- __le64 rx_ctrl_frames;
- __le64 rx_pause_frames;
- __le64 rx_pfc_frames;
- __le64 rx_unsupported_opcode_frames;
- __le64 rx_unsupported_da_pausepfc_frames;
- __le64 rx_wrong_sa_frames;
- __le64 rx_align_err_frames;
- __le64 rx_oor_len_frames;
- __le64 rx_code_err_frames;
- __le64 rx_false_carrier_frames;
- __le64 rx_ovrsz_frames;
- __le64 rx_jbr_frames;
- __le64 rx_mtu_err_frames;
- __le64 rx_match_crc_frames;
- __le64 rx_promiscuous_frames;
- __le64 rx_tagged_frames;
- __le64 rx_double_tagged_frames;
- __le64 rx_trunc_frames;
- __le64 rx_good_frames;
- __le64 rx_pfc_xon2xoff_frames_pri0;
- __le64 rx_pfc_xon2xoff_frames_pri1;
- __le64 rx_pfc_xon2xoff_frames_pri2;
- __le64 rx_pfc_xon2xoff_frames_pri3;
- __le64 rx_pfc_xon2xoff_frames_pri4;
- __le64 rx_pfc_xon2xoff_frames_pri5;
- __le64 rx_pfc_xon2xoff_frames_pri6;
- __le64 rx_pfc_xon2xoff_frames_pri7;
- __le64 rx_pfc_ena_frames_pri0;
- __le64 rx_pfc_ena_frames_pri1;
- __le64 rx_pfc_ena_frames_pri2;
- __le64 rx_pfc_ena_frames_pri3;
- __le64 rx_pfc_ena_frames_pri4;
- __le64 rx_pfc_ena_frames_pri5;
- __le64 rx_pfc_ena_frames_pri6;
- __le64 rx_pfc_ena_frames_pri7;
- __le64 rx_sch_crc_err_frames;
- __le64 rx_undrsz_frames;
- __le64 rx_frag_frames;
- __le64 rx_eee_lpi_events;
- __le64 rx_eee_lpi_duration;
- __le64 rx_llfc_physical_msgs;
- __le64 rx_llfc_logical_msgs;
- __le64 rx_llfc_msgs_with_crc_err;
- __le64 rx_hcfc_msgs;
- __le64 rx_hcfc_msgs_with_crc_err;
- __le64 rx_bytes;
- __le64 rx_runt_bytes;
- __le64 rx_runt_frames;
- __le64 rx_stat_discard;
- __le64 rx_stat_err;
-};
-
-/* rx_port_stats_ext (size:320b/40B) */
-struct rx_port_stats_ext {
- __le64 link_down_events;
- __le64 continuous_pause_events;
- __le64 resume_pause_events;
- __le64 continuous_roce_pause_events;
- __le64 resume_roce_pause_events;
-};
-
/* pcie_ctx_hw_stats (size:768b/96B) */
struct pcie_ctx_hw_stats {
__le64 pcie_pl_signal_integrity;
@@ -5884,6 +6390,114 @@ struct hwrm_wol_reason_qcfg_output {
u8 valid;
};
+/* coredump_segment_record (size:128b/16B) */
+struct coredump_segment_record {
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 max_instances;
+ u8 version_hi;
+ u8 version_low;
+ u8 seg_flags;
+ u8 unused_0[7];
+};
+
+/* hwrm_dbg_coredump_list_input (size:256b/32B) */
+struct hwrm_dbg_coredump_list_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 host_buf_len;
+ __le16 seq_no;
+ u8 unused_0[2];
+};
+
+/* hwrm_dbg_coredump_list_output (size:128b/16B) */
+struct hwrm_dbg_coredump_list_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define DBG_COREDUMP_LIST_RESP_FLAGS_MORE 0x1UL
+ u8 unused_0;
+ __le16 total_segments;
+ __le16 data_len;
+ u8 unused_1;
+ u8 valid;
+};
+
+/* hwrm_dbg_coredump_initiate_input (size:256b/32B) */
+struct hwrm_dbg_coredump_initiate_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 instance;
+ __le16 unused_0;
+ u8 seg_flags;
+ u8 unused_1[7];
+};
+
+/* hwrm_dbg_coredump_initiate_output (size:128b/16B) */
+struct hwrm_dbg_coredump_initiate_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* coredump_data_hdr (size:128b/16B) */
+struct coredump_data_hdr {
+ __le32 address;
+ __le32 flags_length;
+ __le32 instance;
+ __le32 next_offset;
+};
+
+/* hwrm_dbg_coredump_retrieve_input (size:448b/56B) */
+struct hwrm_dbg_coredump_retrieve_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 host_buf_len;
+ __le32 unused_0;
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 instance;
+ __le16 unused_1;
+ u8 seg_flags;
+ u8 unused_2;
+ __le16 unused_3;
+ __le32 unused_4;
+ __le32 seq_no;
+ __le32 unused_5;
+};
+
+/* hwrm_dbg_coredump_retrieve_output (size:128b/16B) */
+struct hwrm_dbg_coredump_retrieve_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define DBG_COREDUMP_RETRIEVE_RESP_FLAGS_MORE 0x1UL
+ u8 unused_0;
+ __le16 data_len;
+ u8 unused_1[3];
+ u8 valid;
+};
+
/* hwrm_nvm_read_input (size:320b/40B) */
struct hwrm_nvm_read_input {
__le16 req_type;
@@ -6201,19 +6815,6 @@ struct hwrm_nvm_install_update_cmd_err {
u8 unused_0[7];
};
-struct hwrm_nvm_variable_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 data_addr;
- __le16 data_len;
- __le16 option_num;
- __le16 dimensions;
- __le16 index_0;
-};
-
/* hwrm_nvm_get_variable_input (size:320b/40B) */
struct hwrm_nvm_get_variable_input {
__le16 req_type;
@@ -6282,12 +6883,14 @@ struct hwrm_nvm_set_variable_input {
__le16 index_2;
__le16 index_3;
u8 flags;
- #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256 (0x2UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH (0x3UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH
u8 unused_0;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index a64910892c25..6d583bcd2a81 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -447,7 +447,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
struct bnxt_pf_info *pf = &bp->pf;
- int i, rc = 0;
+ int i, rc = 0, min = 1;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
@@ -464,14 +464,19 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
req.max_rsscos_ctx = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
- if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL) {
- req.min_cmpl_rings = cpu_to_le16(1);
- req.min_tx_rings = cpu_to_le16(1);
- req.min_rx_rings = cpu_to_le16(1);
- req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MIN_L2_CTX);
- req.min_vnics = cpu_to_le16(1);
- req.min_stat_ctx = cpu_to_le16(1);
- req.min_hw_ring_grps = cpu_to_le16(1);
+ if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
+ min = 0;
+ req.min_rsscos_ctx = cpu_to_le16(min);
+ }
+ if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL ||
+ pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
+ req.min_cmpl_rings = cpu_to_le16(min);
+ req.min_tx_rings = cpu_to_le16(min);
+ req.min_rx_rings = cpu_to_le16(min);
+ req.min_l2_ctxs = cpu_to_le16(min);
+ req.min_vnics = cpu_to_le16(min);
+ req.min_stat_ctx = cpu_to_le16(min);
+ req.min_hw_ring_grps = cpu_to_le16(min);
} else {
vf_cp_rings /= num_vfs;
vf_tx_rings /= num_vfs;
@@ -618,7 +623,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
static int bnxt_func_cfg(struct bnxt *bp, int num_vfs)
{
- if (bp->flags & BNXT_FLAG_NEW_RM)
+ if (BNXT_NEW_RM(bp))
return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs);
else
return bnxt_hwrm_func_cfg(bp, num_vfs);
@@ -956,9 +961,13 @@ static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
} else if (is_valid_ether_addr(vf->vf_mac_addr)) {
if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
mac_ok = true;
- } else if (bp->hwrm_spec_code < 0x10202) {
- mac_ok = true;
} else {
+ /* There are two cases:
+ * 1.If firmware spec < 0x10202,VF MAC address is not forwarded
+ * to the PF and so it doesn't have to match
+ * 2.Allow VF to modify it's own MAC when PF has not assigned a
+ * valid MAC address and firmware spec >= 0x10202
+ */
mac_ok = true;
}
if (mac_ok)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 840f6e505f73..c37b2842f972 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -141,7 +141,7 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
if (avail_msix > num_msix)
avail_msix = num_msix;
- if (bp->flags & BNXT_FLAG_NEW_RM) {
+ if (BNXT_NEW_RM(bp)) {
idx = bp->cp_nr_rings;
} else {
max_idx = min_t(int, bp->total_irqs, max_cp_rings);
@@ -162,7 +162,7 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
return -EAGAIN;
}
- if (bp->flags & BNXT_FLAG_NEW_RM) {
+ if (BNXT_NEW_RM(bp)) {
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
avail_msix = hw_resc->resv_cp_rings - bp->cp_nr_rings;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index e088dedc1747..9f4f3c1d5043 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -1417,50 +1417,6 @@ int validate_cn23xx_pf_config_info(struct octeon_device *oct,
return 0;
}
-void cn23xx_dump_iq_regs(struct octeon_device *oct)
-{
- u32 regval, q_no;
-
- dev_dbg(&oct->pci_dev->dev, "SLI_IQ_DOORBELL_0 [0x%x]: 0x%016llx\n",
- CN23XX_SLI_IQ_DOORBELL(0),
- CVM_CAST64(octeon_read_csr64
- (oct, CN23XX_SLI_IQ_DOORBELL(0))));
-
- dev_dbg(&oct->pci_dev->dev, "SLI_IQ_BASEADDR_0 [0x%x]: 0x%016llx\n",
- CN23XX_SLI_IQ_BASE_ADDR64(0),
- CVM_CAST64(octeon_read_csr64
- (oct, CN23XX_SLI_IQ_BASE_ADDR64(0))));
-
- dev_dbg(&oct->pci_dev->dev, "SLI_IQ_FIFO_RSIZE_0 [0x%x]: 0x%016llx\n",
- CN23XX_SLI_IQ_SIZE(0),
- CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_IQ_SIZE(0))));
-
- dev_dbg(&oct->pci_dev->dev, "SLI_CTL_STATUS [0x%x]: 0x%016llx\n",
- CN23XX_SLI_CTL_STATUS,
- CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_CTL_STATUS)));
-
- for (q_no = 0; q_no < CN23XX_MAX_INPUT_QUEUES; q_no++) {
- dev_dbg(&oct->pci_dev->dev, "SLI_PKT[%d]_INPUT_CTL [0x%x]: 0x%016llx\n",
- q_no, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
- CVM_CAST64(octeon_read_csr64
- (oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no))));
- }
-
- pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
- dev_dbg(&oct->pci_dev->dev, "Config DevCtl [0x%x]: 0x%08x\n",
- CN23XX_CONFIG_PCIE_DEVCTL, regval);
-
- dev_dbg(&oct->pci_dev->dev, "SLI_PRT[%d]_CFG [0x%llx]: 0x%016llx\n",
- oct->pcie_port, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port),
- CVM_CAST64(lio_pci_readq(
- oct, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port))));
-
- dev_dbg(&oct->pci_dev->dev, "SLI_S2M_PORT[%d]_CTL [0x%x]: 0x%016llx\n",
- oct->pcie_port, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port))));
-}
-
int cn23xx_fw_loaded(struct octeon_device *oct)
{
u64 val;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
index 91dce8b8ef7e..962bb62933db 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
@@ -682,33 +682,3 @@ int cn23xx_setup_octeon_vf_device(struct octeon_device *oct)
return 0;
}
-
-void cn23xx_dump_vf_iq_regs(struct octeon_device *oct)
-{
- u32 regval, q_no;
-
- dev_dbg(&oct->pci_dev->dev, "SLI_IQ_DOORBELL_0 [0x%x]: 0x%016llx\n",
- CN23XX_VF_SLI_IQ_DOORBELL(0),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_VF_SLI_IQ_DOORBELL(0))));
-
- dev_dbg(&oct->pci_dev->dev, "SLI_IQ_BASEADDR_0 [0x%x]: 0x%016llx\n",
- CN23XX_VF_SLI_IQ_BASE_ADDR64(0),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_VF_SLI_IQ_BASE_ADDR64(0))));
-
- dev_dbg(&oct->pci_dev->dev, "SLI_IQ_FIFO_RSIZE_0 [0x%x]: 0x%016llx\n",
- CN23XX_VF_SLI_IQ_SIZE(0),
- CVM_CAST64(octeon_read_csr64(oct, CN23XX_VF_SLI_IQ_SIZE(0))));
-
- for (q_no = 0; q_no < oct->sriov_info.rings_per_vf; q_no++) {
- dev_dbg(&oct->pci_dev->dev, "SLI_PKT[%d]_INPUT_CTL [0x%x]: 0x%016llx\n",
- q_no, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no))));
- }
-
- pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
- dev_dbg(&oct->pci_dev->dev, "Config DevCtl [0x%x]: 0x%08x\n",
- CN23XX_CONFIG_PCIE_DEVCTL, regval);
-}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 2320f7829a6b..6f312e03432f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2474,16 +2474,64 @@ static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset)
return NULL;
}
+static int sge_qinfo_uld_txq_entries(const struct adapter *adap, int uld)
+{
+ const struct sge_uld_txq_info *utxq_info = adap->sge.uld_txq_info[uld];
+
+ if (!utxq_info)
+ return 0;
+
+ return DIV_ROUND_UP(utxq_info->ntxq, 4);
+}
+
+static int sge_qinfo_uld_rspq_entries(const struct adapter *adap, int uld,
+ bool ciq)
+{
+ const struct sge_uld_rxq_info *urxq_info = adap->sge.uld_rxq_info[uld];
+
+ if (!urxq_info)
+ return 0;
+
+ return ciq ? DIV_ROUND_UP(urxq_info->nciq, 4) :
+ DIV_ROUND_UP(urxq_info->nrxq, 4);
+}
+
+static int sge_qinfo_uld_rxq_entries(const struct adapter *adap, int uld)
+{
+ return sge_qinfo_uld_rspq_entries(adap, uld, false);
+}
+
+static int sge_qinfo_uld_ciq_entries(const struct adapter *adap, int uld)
+{
+ return sge_qinfo_uld_rspq_entries(adap, uld, true);
+}
+
static int sge_qinfo_show(struct seq_file *seq, void *v)
{
+ int uld_rxq_entries[CXGB4_ULD_MAX] = { 0 };
+ int uld_ciq_entries[CXGB4_ULD_MAX] = { 0 };
+ int uld_txq_entries[CXGB4_TX_MAX] = { 0 };
+ const struct sge_uld_txq_info *utxq_info;
+ const struct sge_uld_rxq_info *urxq_info;
struct adapter *adap = seq->private;
- int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
- int ofld_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
- int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
- int i, r = (uintptr_t)v - 1;
- int ofld_idx = r - eth_entries;
- int ctrl_idx = ofld_idx - ofld_entries;
- int fq_idx = ctrl_idx - ctrl_entries;
+ int i, n, r = (uintptr_t)v - 1;
+ int eth_entries, ctrl_entries;
+ struct sge *s = &adap->sge;
+
+ eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
+ ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
+
+ mutex_lock(&uld_mutex);
+ if (s->uld_txq_info)
+ for (i = 0; i < ARRAY_SIZE(uld_txq_entries); i++)
+ uld_txq_entries[i] = sge_qinfo_uld_txq_entries(adap, i);
+
+ if (s->uld_rxq_info) {
+ for (i = 0; i < ARRAY_SIZE(uld_rxq_entries); i++) {
+ uld_rxq_entries[i] = sge_qinfo_uld_rxq_entries(adap, i);
+ uld_ciq_entries[i] = sge_qinfo_uld_ciq_entries(adap, i);
+ }
+ }
if (r)
seq_putc(seq, '\n');
@@ -2505,9 +2553,10 @@ do { \
if (r < eth_entries) {
int base_qset = r * 4;
- const struct sge_eth_rxq *rx = &adap->sge.ethrxq[base_qset];
- const struct sge_eth_txq *tx = &adap->sge.ethtxq[base_qset];
- int n = min(4, adap->sge.ethqsets - 4 * r);
+ const struct sge_eth_rxq *rx = &s->ethrxq[base_qset];
+ const struct sge_eth_txq *tx = &s->ethtxq[base_qset];
+
+ n = min(4, s->ethqsets - 4 * r);
S("QType:", "Ethernet");
S("Interface:",
@@ -2532,8 +2581,7 @@ do { \
R("RspQ CIDX:", rspq.cidx);
R("RspQ Gen:", rspq.gen);
S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
- S3("u", "Intr pktcnt:",
- adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
R("FL ID:", fl.cntxt_id);
R("FL size:", fl.size - 8);
R("FL pend:", fl.pend_cred);
@@ -2558,9 +2606,196 @@ do { \
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
- } else if (ctrl_idx < ctrl_entries) {
- const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4];
- int n = min(4, adap->params.nports - 4 * ctrl_idx);
+ goto unlock;
+ }
+
+ r -= eth_entries;
+ if (r < uld_txq_entries[CXGB4_TX_OFLD]) {
+ const struct sge_uld_txq *tx;
+
+ utxq_info = s->uld_txq_info[CXGB4_TX_OFLD];
+ tx = &utxq_info->uldtxq[r * 4];
+ n = min(4, utxq_info->ntxq - 4 * r);
+
+ S("QType:", "OFLD-TXQ");
+ T("TxQ ID:", q.cntxt_id);
+ T("TxQ size:", q.size);
+ T("TxQ inuse:", q.in_use);
+ T("TxQ CIDX:", q.cidx);
+ T("TxQ PIDX:", q.pidx);
+
+ goto unlock;
+ }
+
+ r -= uld_txq_entries[CXGB4_TX_OFLD];
+ if (r < uld_rxq_entries[CXGB4_ULD_RDMA]) {
+ const struct sge_ofld_rxq *rx;
+
+ urxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
+ rx = &urxq_info->uldrxq[r * 4];
+ n = min(4, urxq_info->nrxq - 4 * r);
+
+ S("QType:", "RDMA-CPL");
+ S("Interface:",
+ rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ R("FL size:", fl.size - 8);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+
+ goto unlock;
+ }
+
+ r -= uld_rxq_entries[CXGB4_ULD_RDMA];
+ if (r < uld_ciq_entries[CXGB4_ULD_RDMA]) {
+ const struct sge_ofld_rxq *rx;
+ int ciq_idx = 0;
+
+ urxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
+ ciq_idx = urxq_info->nrxq + (r * 4);
+ rx = &urxq_info->uldrxq[ciq_idx];
+ n = min(4, urxq_info->nciq - 4 * r);
+
+ S("QType:", "RDMA-CIQ");
+ S("Interface:",
+ rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
+
+ goto unlock;
+ }
+
+ r -= uld_ciq_entries[CXGB4_ULD_RDMA];
+ if (r < uld_rxq_entries[CXGB4_ULD_ISCSI]) {
+ const struct sge_ofld_rxq *rx;
+
+ urxq_info = s->uld_rxq_info[CXGB4_ULD_ISCSI];
+ rx = &urxq_info->uldrxq[r * 4];
+ n = min(4, urxq_info->nrxq - 4 * r);
+
+ S("QType:", "iSCSI");
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ R("FL size:", fl.size - 8);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+
+ goto unlock;
+ }
+
+ r -= uld_rxq_entries[CXGB4_ULD_ISCSI];
+ if (r < uld_rxq_entries[CXGB4_ULD_ISCSIT]) {
+ const struct sge_ofld_rxq *rx;
+
+ urxq_info = s->uld_rxq_info[CXGB4_ULD_ISCSIT];
+ rx = &urxq_info->uldrxq[r * 4];
+ n = min(4, urxq_info->nrxq - 4 * r);
+
+ S("QType:", "iSCSIT");
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ R("FL size:", fl.size - 8);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+
+ goto unlock;
+ }
+
+ r -= uld_rxq_entries[CXGB4_ULD_ISCSIT];
+ if (r < uld_rxq_entries[CXGB4_ULD_TLS]) {
+ const struct sge_ofld_rxq *rx;
+
+ urxq_info = s->uld_rxq_info[CXGB4_ULD_TLS];
+ rx = &urxq_info->uldrxq[r * 4];
+ n = min(4, urxq_info->nrxq - 4 * r);
+
+ S("QType:", "TLS");
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ R("FL size:", fl.size - 8);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+
+ goto unlock;
+ }
+
+ r -= uld_rxq_entries[CXGB4_ULD_TLS];
+ if (r < uld_txq_entries[CXGB4_TX_CRYPTO]) {
+ const struct sge_ofld_rxq *rx;
+ const struct sge_uld_txq *tx;
+
+ utxq_info = s->uld_txq_info[CXGB4_TX_CRYPTO];
+ urxq_info = s->uld_rxq_info[CXGB4_ULD_CRYPTO];
+ tx = &utxq_info->uldtxq[r * 4];
+ rx = &urxq_info->uldrxq[r * 4];
+ n = min(4, utxq_info->ntxq - 4 * r);
+
+ S("QType:", "Crypto");
+ T("TxQ ID:", q.cntxt_id);
+ T("TxQ size:", q.size);
+ T("TxQ inuse:", q.in_use);
+ T("TxQ CIDX:", q.cidx);
+ T("TxQ PIDX:", q.pidx);
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ R("FL size:", fl.size - 8);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+
+ goto unlock;
+ }
+
+ r -= uld_txq_entries[CXGB4_TX_CRYPTO];
+ if (r < ctrl_entries) {
+ const struct sge_ctrl_txq *tx = &s->ctrlq[r * 4];
+
+ n = min(4, adap->params.nports - 4 * r);
S("QType:", "Control");
T("TxQ ID:", q.cntxt_id);
@@ -2570,8 +2805,13 @@ do { \
T("TxQ PIDX:", q.pidx);
TL("TxQFull:", q.stops);
TL("TxQRestarts:", q.restarts);
- } else if (fq_idx == 0) {
- const struct sge_rspq *evtq = &adap->sge.fw_evtq;
+
+ goto unlock;
+ }
+
+ r -= ctrl_entries;
+ if (r < 1) {
+ const struct sge_rspq *evtq = &s->fw_evtq;
seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
@@ -2582,8 +2822,13 @@ do { \
seq_printf(seq, "%-12s %16u\n", "Intr delay:",
qtimer_val(adap, evtq));
seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
- adap->sge.counter_val[evtq->pktcnt_idx]);
+ s->counter_val[evtq->pktcnt_idx]);
+
+ goto unlock;
}
+
+unlock:
+ mutex_unlock(&uld_mutex);
#undef R
#undef RL
#undef T
@@ -2597,8 +2842,21 @@ do { \
static int sge_queue_entries(const struct adapter *adap)
{
+ int tot_uld_entries = 0;
+ int i;
+
+ mutex_lock(&uld_mutex);
+ for (i = 0; i < CXGB4_TX_MAX; i++)
+ tot_uld_entries += sge_qinfo_uld_txq_entries(adap, i);
+
+ for (i = 0; i < CXGB4_ULD_MAX; i++) {
+ tot_uld_entries += sge_qinfo_uld_rxq_entries(adap, i);
+ tot_uld_entries += sge_qinfo_uld_ciq_entries(adap, i);
+ }
+ mutex_unlock(&uld_mutex);
+
return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
- DIV_ROUND_UP(adap->sge.ofldqsets, 4) +
+ tot_uld_entries +
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 40cf8dc9f163..0f7ce71205e6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -554,10 +554,9 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
dev = q->adap->port[q->adap->chan_map[port]];
dcbxdis = (action == FW_PORT_ACTION_GET_PORT_INFO
- ? !!(pcmd->u.info.dcbxdis_pkd &
- FW_PORT_CMD_DCBXDIS_F)
- : !!(pcmd->u.info32.lstatus32_to_cbllen32 &
- FW_PORT_CMD_DCBXDIS32_F));
+ ? !!(pcmd->u.info.dcbxdis_pkd & FW_PORT_CMD_DCBXDIS_F)
+ : !!(be32_to_cpu(pcmd->u.info32.lstatus32_to_cbllen32)
+ & FW_PORT_CMD_DCBXDIS32_F));
state_input = (dcbxdis
? CXGB4_DCB_INPUT_FW_DISABLED
: CXGB4_DCB_INPUT_FW_ENABLED);
@@ -3074,6 +3073,7 @@ static void cxgb_del_udp_tunnel(struct net_device *netdev,
adapter->geneve_port = 0;
t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
+ break;
default:
return;
}
@@ -3159,6 +3159,7 @@ static void cxgb_add_udp_tunnel(struct net_device *netdev,
t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
+ break;
default:
return;
}
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index 5ab912937aff..ec0b545197e2 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -19,6 +19,7 @@ if NET_VENDOR_CIRRUS
config CS89x0
tristate "CS89x0 support"
depends on ISA || EISA || ARM
+ depends on !PPC32
---help---
Support for CS89x0 chipset based Ethernet cards. If you have a
network (Ethernet) card of this type, say Y and read the file
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 90c645b8538e..60641e202534 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2047,28 +2047,42 @@ static int enic_stop(struct net_device *netdev)
return 0;
}
+static int _enic_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ bool running = netif_running(netdev);
+ int err = 0;
+
+ ASSERT_RTNL();
+ if (running) {
+ err = enic_stop(netdev);
+ if (err)
+ return err;
+ }
+
+ netdev->mtu = new_mtu;
+
+ if (running) {
+ err = enic_open(netdev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int enic_change_mtu(struct net_device *netdev, int new_mtu)
{
struct enic *enic = netdev_priv(netdev);
- int running = netif_running(netdev);
if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
return -EOPNOTSUPP;
- if (running)
- enic_stop(netdev);
-
- netdev->mtu = new_mtu;
-
if (netdev->mtu > enic->port_mtu)
netdev_warn(netdev,
- "interface MTU (%d) set higher than port MTU (%d)\n",
- netdev->mtu, enic->port_mtu);
+ "interface MTU (%d) set higher than port MTU (%d)\n",
+ netdev->mtu, enic->port_mtu);
- if (running)
- enic_open(netdev);
-
- return 0;
+ return _enic_change_mtu(netdev, new_mtu);
}
static void enic_change_mtu_work(struct work_struct *work)
@@ -2076,47 +2090,9 @@ static void enic_change_mtu_work(struct work_struct *work)
struct enic *enic = container_of(work, struct enic, change_mtu_work);
struct net_device *netdev = enic->netdev;
int new_mtu = vnic_dev_mtu(enic->vdev);
- int err;
- unsigned int i;
-
- new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
rtnl_lock();
-
- /* Stop RQ */
- del_timer_sync(&enic->notify_timer);
-
- for (i = 0; i < enic->rq_count; i++)
- napi_disable(&enic->napi[i]);
-
- vnic_intr_mask(&enic->intr[0]);
- enic_synchronize_irqs(enic);
- err = vnic_rq_disable(&enic->rq[0]);
- if (err) {
- rtnl_unlock();
- netdev_err(netdev, "Unable to disable RQ.\n");
- return;
- }
- vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
- vnic_cq_clean(&enic->cq[0]);
- vnic_intr_clean(&enic->intr[0]);
-
- /* Fill RQ with new_mtu-sized buffers */
- netdev->mtu = new_mtu;
- vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
- /* Need at least one buffer on ring to get going */
- if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
- rtnl_unlock();
- netdev_err(netdev, "Unable to alloc receive buffers.\n");
- return;
- }
-
- /* Start RQ */
- vnic_rq_enable(&enic->rq[0]);
- napi_enable(&enic->napi[0]);
- vnic_intr_unmask(&enic->intr[0]);
- enic_notify_timer_start(enic);
-
+ (void)_enic_change_mtu(netdev, new_mtu);
rtnl_unlock();
netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
@@ -2916,7 +2892,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
enic->port_mtu = enic->config.mtu;
- (void)enic_change_mtu(netdev, enic->port_mtu);
err = enic_set_mac_addr(netdev, enic->mac_addr);
if (err) {
@@ -3006,6 +2981,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* MTU range: 68 - 9000 */
netdev->min_mtu = ENIC_MIN_MTU;
netdev->max_mtu = ENIC_MAX_MTU;
+ netdev->mtu = enic->port_mtu;
err = register_netdev(netdev);
if (err) {
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index e9db811df59c..901e44b0b795 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -1071,7 +1071,7 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
unsigned int num_bars)
{
if (!vdev) {
- vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
+ vdev = kzalloc(sizeof(struct vnic_dev), GFP_KERNEL);
if (!vdev)
return NULL;
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index f8aa326d1d58..a3e7b003ada1 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -35,7 +35,7 @@ static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
for (i = 0; i < blks; i++) {
- rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC);
+ rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_KERNEL);
if (!rq->bufs[i])
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c
index 090cc65658a3..eb75891974df 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c
@@ -35,7 +35,7 @@ static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
for (i = 0; i < blks; i++) {
- wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC);
+ wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_KERNEL);
if (!wq->bufs[i])
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 7005949dc17b..d80fe03d3107 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -37,7 +37,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "11.4.0.0"
+#define DRV_VER "12.0.0.0"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 05e4c0bb25f4..d0b9415d9ae7 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1412,6 +1412,83 @@ drop:
return NETDEV_TX_OK;
}
+static void be_tx_timeout(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->pdev->dev;
+ struct be_tx_obj *txo;
+ struct sk_buff *skb;
+ struct tcphdr *tcphdr;
+ struct udphdr *udphdr;
+ u32 *entry;
+ int status;
+ int i, j;
+
+ for_all_tx_queues(adapter, txo, i) {
+ dev_info(dev, "TXQ Dump: %d H: %d T: %d used: %d, qid: 0x%x\n",
+ i, txo->q.head, txo->q.tail,
+ atomic_read(&txo->q.used), txo->q.id);
+
+ entry = txo->q.dma_mem.va;
+ for (j = 0; j < TX_Q_LEN * 4; j += 4) {
+ if (entry[j] != 0 || entry[j + 1] != 0 ||
+ entry[j + 2] != 0 || entry[j + 3] != 0) {
+ dev_info(dev, "Entry %d 0x%x 0x%x 0x%x 0x%x\n",
+ j, entry[j], entry[j + 1],
+ entry[j + 2], entry[j + 3]);
+ }
+ }
+
+ entry = txo->cq.dma_mem.va;
+ dev_info(dev, "TXCQ Dump: %d H: %d T: %d used: %d\n",
+ i, txo->cq.head, txo->cq.tail,
+ atomic_read(&txo->cq.used));
+ for (j = 0; j < TX_CQ_LEN * 4; j += 4) {
+ if (entry[j] != 0 || entry[j + 1] != 0 ||
+ entry[j + 2] != 0 || entry[j + 3] != 0) {
+ dev_info(dev, "Entry %d 0x%x 0x%x 0x%x 0x%x\n",
+ j, entry[j], entry[j + 1],
+ entry[j + 2], entry[j + 3]);
+ }
+ }
+
+ for (j = 0; j < TX_Q_LEN; j++) {
+ if (txo->sent_skb_list[j]) {
+ skb = txo->sent_skb_list[j];
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP) {
+ tcphdr = tcp_hdr(skb);
+ dev_info(dev, "TCP source port %d\n",
+ ntohs(tcphdr->source));
+ dev_info(dev, "TCP dest port %d\n",
+ ntohs(tcphdr->dest));
+ dev_info(dev, "TCP sequence num %d\n",
+ ntohs(tcphdr->seq));
+ dev_info(dev, "TCP ack_seq %d\n",
+ ntohs(tcphdr->ack_seq));
+ } else if (ip_hdr(skb)->protocol ==
+ IPPROTO_UDP) {
+ udphdr = udp_hdr(skb);
+ dev_info(dev, "UDP source port %d\n",
+ ntohs(udphdr->source));
+ dev_info(dev, "UDP dest port %d\n",
+ ntohs(udphdr->dest));
+ }
+ dev_info(dev, "skb[%d] %p len %d proto 0x%x\n",
+ j, skb, skb->len, skb->protocol);
+ }
+ }
+ }
+
+ if (lancer_chip(adapter)) {
+ dev_info(dev, "Initiating reset due to tx timeout\n");
+ dev_info(dev, "Resetting adapter\n");
+ status = lancer_physdev_ctrl(adapter,
+ PHYSDEV_CONTROL_FW_RESET_MASK);
+ if (status)
+ dev_err(dev, "Reset failed .. Reboot server\n");
+ }
+}
+
static inline bool be_in_all_promisc(struct be_adapter *adapter)
{
return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
@@ -3274,7 +3351,7 @@ void be_detect_error(struct be_adapter *adapter)
/* Do not log error messages if its a FW reset */
if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
- dev_info(dev, "Firmware update in progress\n");
+ dev_info(dev, "Reset is in progress\n");
} else {
dev_err(dev, "Error detected in the card\n");
dev_err(dev, "ERR: sliport status 0x%x\n",
@@ -5218,6 +5295,7 @@ static const struct net_device_ops be_netdev_ops = {
.ndo_get_vf_config = be_get_vf_config,
.ndo_set_vf_link_state = be_set_vf_link_state,
.ndo_set_vf_spoofchk = be_set_vf_spoofchk,
+ .ndo_tx_timeout = be_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = be_netpoll,
#endif
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index c729665107f5..76366c735831 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -48,6 +48,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/clk.h>
+#include <linux/crc32.h>
#include <linux/platform_device.h>
#include <linux/mdio.h>
#include <linux/phy.h>
@@ -2955,7 +2956,7 @@ static void set_multicast_list(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct netdev_hw_addr *ha;
- unsigned int i, bit, data, crc, tmp;
+ unsigned int crc, tmp;
unsigned char hash;
unsigned int hash_high = 0, hash_low = 0;
@@ -2983,15 +2984,7 @@ static void set_multicast_list(struct net_device *ndev)
/* Add the addresses in hash register */
netdev_for_each_mc_addr(ha, ndev) {
/* calculate crc32 value of mac address */
- crc = 0xffffffff;
-
- for (i = 0; i < ndev->addr_len; i++) {
- data = ha->addr[i];
- for (bit = 0; bit < 8; bit++, data >>= 1) {
- crc = (crc >> 1) ^
- (((crc ^ data) & 1) ? CRC32_POLY : 0);
- }
- }
+ crc = ether_crc_le(ndev->addr_len, ha->addr);
/* only upper 6 bits (FEC_HASH_BITS) are used
* which point to specific bit in the hash registers
@@ -3136,6 +3129,7 @@ static int fec_enet_init(struct net_device *ndev)
unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
sizeof(struct bufdesc);
unsigned dsize_log2 = __fls(dsize);
+ int ret;
WARN_ON(dsize != (1 << dsize_log2));
#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
@@ -3146,6 +3140,13 @@ static int fec_enet_init(struct net_device *ndev)
fep->tx_align = 0x3;
#endif
+ /* Check mask of the streaming and coherent API */
+ ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32));
+ if (ret < 0) {
+ dev_warn(&fep->pdev->dev, "No suitable DMA available\n");
+ return ret;
+ }
+
fec_enet_alloc_queue(ndev);
bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index 1fc27c97e3b2..99fe2c210d0f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -18,6 +18,7 @@
#include <linux/string.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
+#include <linux/crc32.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
@@ -176,21 +177,10 @@ static void set_multicast_start(struct net_device *dev)
static void set_multicast_one(struct net_device *dev, const u8 *mac)
{
struct fs_enet_private *fep = netdev_priv(dev);
- int temp, hash_index, i, j;
+ int temp, hash_index;
u32 crc, csrVal;
- u8 byte, msb;
-
- crc = 0xffffffff;
- for (i = 0; i < 6; i++) {
- byte = mac[i];
- for (j = 0; j < 8; j++) {
- msb = crc >> 31;
- crc <<= 1;
- if (msb ^ (byte & 0x1))
- crc ^= FEC_CRC_POLY;
- byte >>= 1;
- }
- }
+
+ crc = ether_crc(6, mac);
temp = (crc & 0x3f) >> 1;
hash_index = ((temp & 0x01) << 4) |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index bd68379d2bea..e6aad30e7e69 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -70,8 +70,8 @@ static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q)
return container_of(q, struct ring_pair_cb, q);
}
-struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
- u32 port_id)
+static struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
+ u32 port_id)
{
int vfnum_per_port;
int qnum_per_vf;
@@ -329,7 +329,7 @@ static int hns_ae_start(struct hnae_handle *handle)
return 0;
}
-void hns_ae_stop(struct hnae_handle *handle)
+static void hns_ae_stop(struct hnae_handle *handle)
{
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
@@ -357,7 +357,7 @@ static void hns_ae_reset(struct hnae_handle *handle)
}
}
-void hns_ae_toggle_ring_irq(struct hnae_ring *ring, u32 mask)
+static void hns_ae_toggle_ring_irq(struct hnae_ring *ring, u32 mask)
{
u32 flag;
@@ -577,8 +577,8 @@ static void hns_ae_get_coalesce_range(struct hnae_handle *handle,
*rx_usecs_high = HNS_RCB_RX_USECS_HIGH;
}
-void hns_ae_update_stats(struct hnae_handle *handle,
- struct net_device_stats *net_stats)
+static void hns_ae_update_stats(struct hnae_handle *handle,
+ struct net_device_stats *net_stats)
{
int port;
int idx;
@@ -660,7 +660,7 @@ void hns_ae_update_stats(struct hnae_handle *handle,
net_stats->multicast = mac_cb->hw_stats.rx_mc_pkts;
}
-void hns_ae_get_stats(struct hnae_handle *handle, u64 *data)
+static void hns_ae_get_stats(struct hnae_handle *handle, u64 *data)
{
int idx;
struct hns_mac_cb *mac_cb;
@@ -692,8 +692,8 @@ void hns_ae_get_stats(struct hnae_handle *handle, u64 *data)
hns_dsaf_get_stats(vf_cb->dsaf_dev, p, vf_cb->port_index);
}
-void hns_ae_get_strings(struct hnae_handle *handle,
- u32 stringset, u8 *data)
+static void hns_ae_get_strings(struct hnae_handle *handle,
+ u32 stringset, u8 *data)
{
int port;
int idx;
@@ -725,7 +725,7 @@ void hns_ae_get_strings(struct hnae_handle *handle,
hns_dsaf_get_strings(stringset, p, port, dsaf_dev);
}
-int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset)
+static int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset)
{
u32 sset_count = 0;
struct hns_mac_cb *mac_cb;
@@ -771,7 +771,7 @@ static int hns_ae_config_loopback(struct hnae_handle *handle,
return ret;
}
-void hns_ae_update_led_status(struct hnae_handle *handle)
+static void hns_ae_update_led_status(struct hnae_handle *handle)
{
struct hns_mac_cb *mac_cb;
@@ -783,8 +783,8 @@ void hns_ae_update_led_status(struct hnae_handle *handle)
hns_set_led_opt(mac_cb);
}
-int hns_ae_cpld_set_led_id(struct hnae_handle *handle,
- enum hnae_led_state status)
+static int hns_ae_cpld_set_led_id(struct hnae_handle *handle,
+ enum hnae_led_state status)
{
struct hns_mac_cb *mac_cb;
@@ -795,7 +795,7 @@ int hns_ae_cpld_set_led_id(struct hnae_handle *handle,
return hns_cpld_led_set_id(mac_cb, status);
}
-void hns_ae_get_regs(struct hnae_handle *handle, void *data)
+static void hns_ae_get_regs(struct hnae_handle *handle, void *data)
{
u32 *p = data;
int i;
@@ -820,7 +820,7 @@ void hns_ae_get_regs(struct hnae_handle *handle, void *data)
hns_dsaf_get_regs(vf_cb->dsaf_dev, vf_cb->port_index, p);
}
-int hns_ae_get_regs_len(struct hnae_handle *handle)
+static int hns_ae_get_regs_len(struct hnae_handle *handle)
{
u32 total_num;
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 74bd260ca02a..5488c6e89f21 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -339,7 +339,7 @@ static void hns_gmac_init(void *mac_drv)
GMAC_TX_WATER_LINE_SHIFT, 8);
}
-void hns_gmac_update_stats(void *mac_drv)
+static void hns_gmac_update_stats(void *mac_drv)
{
struct mac_hw_stats *hw_stats = NULL;
struct mac_driver *drv = (struct mac_driver *)mac_drv;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 9dcc5765f11f..3545a5d0bc95 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -458,11 +458,6 @@ int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu, u32 buf_size)
{
struct mac_driver *drv = hns_mac_get_drv(mac_cb);
u32 new_frm = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
- u32 max_frm = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver) ?
- MAC_MAX_MTU : MAC_MAX_MTU_V2;
-
- if (mac_cb->mac_type == HNAE_PORT_DEBUG)
- max_frm = MAC_MAX_MTU_DBG;
if (new_frm > HNS_RCB_RING_MAX_BD_PER_PKT * buf_size)
return -EINVAL;
@@ -931,8 +926,9 @@ static int hns_mac_get_mode(phy_interface_t phy_if)
}
}
-u8 __iomem *hns_mac_get_vaddr(struct dsaf_device *dsaf_dev,
- struct hns_mac_cb *mac_cb, u32 mac_mode_idx)
+static u8 __iomem *
+hns_mac_get_vaddr(struct dsaf_device *dsaf_dev,
+ struct hns_mac_cb *mac_cb, u32 mac_mode_idx)
{
u8 __iomem *base = dsaf_dev->io_base;
int mac_id = mac_cb->mac_id;
@@ -950,7 +946,8 @@ u8 __iomem *hns_mac_get_vaddr(struct dsaf_device *dsaf_dev,
* @mac_cb: mac control block
* return 0 - success , negative --fail
*/
-int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb)
+static int
+hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb)
{
int ret;
u32 mac_mode_idx;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 0ce07f6eb1e6..ca50c2553a9c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -28,7 +28,7 @@
#include "hns_dsaf_rcb.h"
#include "hns_dsaf_misc.h"
-const char *g_dsaf_mode_match[DSAF_MODE_MAX] = {
+const static char *g_dsaf_mode_match[DSAF_MODE_MAX] = {
[DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf",
[DSAF_MODE_DISABLE_6PORT_0VM] = "6port-16rss",
[DSAF_MODE_DISABLE_6PORT_16VM] = "6port-16vf",
@@ -42,7 +42,7 @@ static const struct acpi_device_id hns_dsaf_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, hns_dsaf_acpi_match);
-int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
+static int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
{
int ret, i;
u32 desc_num;
@@ -959,7 +959,8 @@ static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address)
spin_unlock_bh(&dsaf_dev->tcam_lock);
}
-void hns_dsaf_tcam_addr_get(struct dsaf_drv_tbl_tcam_key *mac_key, u8 *addr)
+static void
+hns_dsaf_tcam_addr_get(struct dsaf_drv_tbl_tcam_key *mac_key, u8 *addr)
{
addr[0] = mac_key->high.bits.mac_0;
addr[1] = mac_key->high.bits.mac_1;
@@ -1682,7 +1683,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
struct dsaf_tbl_tcam_mcast_cfg mac_data;
struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
- struct dsaf_drv_tbl_tcam_key tmp_mac_key;
struct dsaf_tbl_tcam_data tcam_data;
u8 mc_addr[ETH_ALEN];
int mskid;
@@ -1739,10 +1739,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
/* if exist, add in */
hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data,
&mac_data);
-
- tmp_mac_key.high.val =
- le32_to_cpu(tcam_data.tbl_tcam_data_high);
- tmp_mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
}
/* config hardware entry */
@@ -1852,7 +1848,7 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
struct dsaf_tbl_tcam_data tcam_data;
int mskid;
const u8 empty_msk[sizeof(mac_data.tbl_mcast_port_msk)] = {0};
- struct dsaf_drv_tbl_tcam_key mask_key, tmp_mac_key;
+ struct dsaf_drv_tbl_tcam_key mask_key;
struct dsaf_tbl_tcam_data *pmask_key = NULL;
u8 mc_addr[ETH_ALEN];
@@ -1915,9 +1911,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
/* read entry */
hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data, &mac_data);
- tmp_mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high);
- tmp_mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low);
-
/*del the port*/
if (mac_entry->port_num < DSAF_SERVICE_NW_NUM) {
mskid = mac_entry->port_num;
@@ -2084,8 +2077,9 @@ static void hns_dsaf_pfc_unit_cnt(struct dsaf_device *dsaf_dev, int mac_id,
* @dsaf_id: dsa fabric id
* @xge_ge_work_mode
*/
-void hns_dsaf_port_work_rate_cfg(struct dsaf_device *dsaf_dev, int mac_id,
- enum dsaf_port_rate_mode rate_mode)
+static void
+hns_dsaf_port_work_rate_cfg(struct dsaf_device *dsaf_dev, int mac_id,
+ enum dsaf_port_rate_mode rate_mode)
{
u32 port_work_mode;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index acf29633ec79..16294cd3c954 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -340,7 +340,8 @@ static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
* bit18-19 for com/dfx
* @enable: false - request reset , true - drop reset
*/
-void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
+static void
+hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
{
u32 reg_addr;
@@ -362,7 +363,7 @@ void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
* bit18-19 for com/dfx
* @enable: false - request reset , true - drop reset
*/
-void
+static void
hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
{
hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
@@ -370,7 +371,7 @@ hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
msk, dereset);
}
-void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset)
+static void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset)
{
if (!dereset) {
dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_RESET_REQ_REG, 1);
@@ -384,7 +385,7 @@ void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset)
}
}
-void hns_dsaf_roce_srst_acpi(struct dsaf_device *dsaf_dev, bool dereset)
+static void hns_dsaf_roce_srst_acpi(struct dsaf_device *dsaf_dev, bool dereset)
{
hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
HNS_ROCE_RESET_FUNC, 0, dereset);
@@ -568,7 +569,7 @@ static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb)
return phy_if;
}
-int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
+static int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
{
u32 val = 0;
int ret;
@@ -586,7 +587,7 @@ int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
return 0;
}
-int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
+static int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
{
union acpi_object *obj;
union acpi_object obj_args, argv4;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index 93e71e27401b..d160d8c9e45b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -73,7 +73,7 @@ hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common)
* comm_index: common index
* retuen 0 - success , negative --fail
*/
-int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
+static int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
{
struct ppe_common_cb *ppe_common;
int ppe_num;
@@ -104,7 +104,8 @@ int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
return 0;
}
-void hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
+static void
+hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
{
dsaf_dev->ppe_common[comm_index] = NULL;
}
@@ -203,9 +204,9 @@ static int hns_ppe_common_init_hw(struct ppe_common_cb *ppe_common)
enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode;
dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 0);
- mdelay(100);
+ msleep(100);
dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 1);
- mdelay(100);
+ msleep(100);
if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) {
switch (dsaf_mode) {
@@ -337,7 +338,7 @@ static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb)
}
}
-void hns_ppe_uninit_ex(struct ppe_common_cb *ppe_common)
+static void hns_ppe_uninit_ex(struct ppe_common_cb *ppe_common)
{
u32 i;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index e2e28532e4dc..9d76e2e54f9d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -705,7 +705,7 @@ void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, u16 *max_vfn,
}
}
-int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
+static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
{
switch (dsaf_dev->dsaf_mode) {
case DSAF_MODE_ENABLE_FIX:
@@ -741,7 +741,7 @@ int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
}
}
-void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
+static void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
{
struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index 51e7e9f5af49..ba4316910dea 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -215,10 +215,10 @@ static void hns_xgmac_init(void *mac_drv)
u32 port = drv->mac_id;
dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 0);
- mdelay(100);
+ msleep(100);
dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 1);
- mdelay(100);
+ msleep(100);
hns_xgmac_lf_rf_control_init(drv);
hns_xgmac_exc_irq_en(drv, 0);
@@ -311,7 +311,7 @@ static void hns_xgmac_config_max_frame_length(void *mac_drv, u16 newval)
dsaf_write_dev(drv, XGMAC_MAC_MAX_PKT_SIZE_REG, newval);
}
-void hns_xgmac_update_stats(void *mac_drv)
+static void hns_xgmac_update_stats(void *mac_drv)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
struct mac_hw_stats *hw_stats = &drv->mac_cb->hw_stats;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 948b3e0d18f4..c2ac187ec8fc 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1300,7 +1300,7 @@ static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p)
return 0;
}
-void hns_nic_update_stats(struct net_device *netdev)
+static void hns_nic_update_stats(struct net_device *netdev)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
@@ -1582,7 +1582,7 @@ static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
/* use only for netconsole to poll with the device without interrupt */
#ifdef CONFIG_NET_POLL_CONTROLLER
-void hns_nic_poll_controller(struct net_device *ndev)
+static void hns_nic_poll_controller(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
unsigned long flags;
@@ -1935,7 +1935,7 @@ static int hns_nic_uc_unsync(struct net_device *netdev,
*
* return void
*/
-void hns_set_multicast_list(struct net_device *ndev)
+static void hns_set_multicast_list(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
@@ -1957,7 +1957,7 @@ void hns_set_multicast_list(struct net_device *ndev)
}
}
-void hns_nic_set_rx_mode(struct net_device *ndev)
+static void hns_nic_set_rx_mode(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 2e14a3ae1d8b..3957205abb81 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -658,8 +658,8 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev,
* @dev: net device
* @param: ethtool parameter
*/
-void hns_get_ringparam(struct net_device *net_dev,
- struct ethtool_ringparam *param)
+static void hns_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *param)
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_ae_ops *ops;
@@ -808,7 +808,8 @@ static int hns_set_coalesce(struct net_device *net_dev,
* @dev: net device
* @ch: channel info.
*/
-void hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch)
+static void
+hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch)
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
@@ -825,8 +826,8 @@ void hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch)
* @stats: statistics info.
* @data: statistics data.
*/
-void hns_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
+static void hns_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
{
u64 *p = data;
struct hns_nic_priv *priv = netdev_priv(netdev);
@@ -883,7 +884,7 @@ void hns_get_ethtool_stats(struct net_device *netdev,
* @stats: string set ID.
* @data: objects data.
*/
-void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
@@ -973,7 +974,7 @@ void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
*
* Return string set count.
*/
-int hns_get_sset_count(struct net_device *netdev, int stringset)
+static int hns_get_sset_count(struct net_device *netdev, int stringset)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
@@ -1007,7 +1008,7 @@ int hns_get_sset_count(struct net_device *netdev, int stringset)
*
* Return 0 on success, negative on failure.
*/
-int hns_phy_led_set(struct net_device *netdev, int value)
+static int hns_phy_led_set(struct net_device *netdev, int value)
{
int retval;
struct phy_device *phy_dev = netdev->phydev;
@@ -1029,7 +1030,8 @@ int hns_phy_led_set(struct net_device *netdev, int value)
*
* Return 0 on success, negative on failure.
*/
-int hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
+static int
+hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
@@ -1103,8 +1105,8 @@ int hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
* @cmd: ethtool cmd
* @date: register data
*/
-void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
- void *data)
+static void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
+ void *data)
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_ae_ops *ops;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 6c9e5d62455b..bd031af38a96 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -50,7 +50,8 @@ static const struct pci_device_id hns3_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
/* required last entry */
{0, }
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 165c3d5e1c4c..ac13cb2b168e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -147,7 +147,12 @@ static int hclge_cmd_csq_clean(struct hclge_hw *hw)
if (!is_valid_csq_clean_head(csq, head)) {
dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
csq->next_to_use, csq->next_to_clean);
- return 0;
+ dev_warn(&hdev->pdev->dev,
+ "Disabling any further commands to IMP firmware\n");
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ dev_warn(&hdev->pdev->dev,
+ "IMP firmware watchdog reset soon expected!\n");
+ return -EIO;
}
clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
@@ -267,10 +272,11 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
/* Clean the command send queue */
handle = hclge_cmd_csq_clean(hw);
- if (handle != num) {
+ if (handle < 0)
+ retval = handle;
+ else if (handle != num)
dev_warn(&hdev->pdev->dev,
"cleaned %d, need to clean %d\n", handle, num);
- }
spin_unlock_bh(&hw->cmq.csq.lock);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 5cd22f9bbdec..cd0a4f228470 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -358,6 +358,8 @@ struct hclge_pf_res_cmd {
__le16 buf_size;
__le16 msixcap_localid_ba_nic;
__le16 msixcap_localid_ba_rocee;
+#define HCLGE_MSIX_OFT_ROCEE_S 0
+#define HCLGE_MSIX_OFT_ROCEE_M GENMASK(15, 0)
#define HCLGE_PF_VEC_NUM_S 0
#define HCLGE_PF_VEC_NUM_M GENMASK(7, 0)
__le16 pf_intr_vector_number;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index a9b888f1544a..fc813b7f20e8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -932,6 +932,9 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
if (hnae3_dev_roce_supported(hdev)) {
+ hdev->roce_base_msix_offset =
+ hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
+ HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
hdev->num_roce_msi =
hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
@@ -939,7 +942,8 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
/* PF should have NIC vectors and Roce vectors,
* NIC vectors are queued before Roce vectors.
*/
- hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET;
+ hdev->num_msi = hdev->num_roce_msi +
+ hdev->roce_base_msix_offset;
} else {
hdev->num_msi =
hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
@@ -2037,7 +2041,7 @@ static int hclge_init_msi(struct hclge_dev *hdev)
hdev->num_msi_left = vectors;
hdev->base_msi_vector = pdev->irq;
hdev->roce_base_vector = hdev->base_msi_vector +
- HCLGE_ROCE_VECTOR_OFFSET;
+ hdev->roce_base_msix_offset;
hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
sizeof(u16), GFP_KERNEL);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index dfa5c9456d22..1528fb3fa6be 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -16,8 +16,6 @@
#define HCLGE_INVALID_VPORT 0xffff
-#define HCLGE_ROCE_VECTOR_OFFSET 96
-
#define HCLGE_PF_CFG_BLOCK_SIZE 32
#define HCLGE_PF_CFG_DESC_NUM \
(HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES)
@@ -509,6 +507,7 @@ struct hclge_dev {
u16 num_msi;
u16 num_msi_left;
u16 num_msi_used;
+ u16 roce_base_msix_offset;
u32 base_msi_vector;
u16 *vector_status;
int *vector_irq;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index 621c6cbacf76..19b32860309c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -82,6 +82,7 @@ struct hclgevf_cmq {
enum hclgevf_opcode_type {
/* Generic command */
HCLGEVF_OPC_QUERY_FW_VER = 0x0001,
+ HCLGEVF_OPC_QUERY_VF_RSRC = 0x0024,
/* TQP command */
HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03,
HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13,
@@ -134,6 +135,19 @@ struct hclgevf_query_version_cmd {
__le32 firmware_rsv[5];
};
+#define HCLGEVF_MSIX_OFT_ROCEE_S 0
+#define HCLGEVF_MSIX_OFT_ROCEE_M (0xffff << HCLGEVF_MSIX_OFT_ROCEE_S)
+#define HCLGEVF_VEC_NUM_S 0
+#define HCLGEVF_VEC_NUM_M (0xff << HCLGEVF_VEC_NUM_S)
+struct hclgevf_query_res_cmd {
+ __le16 tqp_num;
+ __le16 reserved;
+ __le16 msixcap_localid_ba_nic;
+ __le16 msixcap_localid_ba_rocee;
+ __le16 vf_intr_vector_number;
+ __le16 rsv[7];
+};
+
#define HCLGEVF_RSS_HASH_KEY_OFFSET 4
#define HCLGEVF_RSS_HASH_KEY_NUM 16
struct hclgevf_rss_config_cmd {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index d1f16f0c1646..9c0091f2addf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1370,14 +1370,13 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
struct hnae3_handle *roce = &hdev->roce;
struct hnae3_handle *nic = &hdev->nic;
- roce->rinfo.num_vectors = HCLGEVF_ROCEE_VECTOR_NUM;
+ roce->rinfo.num_vectors = hdev->num_roce_msix;
if (hdev->num_msi_left < roce->rinfo.num_vectors ||
hdev->num_msi_left == 0)
return -EINVAL;
- roce->rinfo.base_vector =
- hdev->vector_status[hdev->num_msi_used];
+ roce->rinfo.base_vector = hdev->roce_base_vector;
roce->rinfo.netdev = nic->kinfo.netdev;
roce->rinfo.roce_io_base = hdev->hw.io_base;
@@ -1520,10 +1519,15 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
if (hclgevf_dev_ongoing_reset(hdev))
return 0;
- hdev->num_msi = HCLGEVF_MAX_VF_VECTOR_NUM;
+ if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B))
+ vectors = pci_alloc_irq_vectors(pdev,
+ hdev->roce_base_msix_offset + 1,
+ hdev->num_msi,
+ PCI_IRQ_MSIX);
+ else
+ vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
+ PCI_IRQ_MSI | PCI_IRQ_MSIX);
- vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
- PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (vectors < 0) {
dev_err(&pdev->dev,
"failed(%d) to allocate MSI/MSI-X vectors\n",
@@ -1538,6 +1542,7 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
hdev->num_msi = vectors;
hdev->num_msi_left = vectors;
hdev->base_msi_vector = pdev->irq;
+ hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset;
hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
sizeof(u16), GFP_KERNEL);
@@ -1733,6 +1738,45 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
pci_disable_device(pdev);
}
+static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_query_res_cmd *req;
+ struct hclgevf_desc desc;
+ int ret;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true);
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "query vf resource failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ req = (struct hclgevf_query_res_cmd *)desc.data;
+
+ if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) {
+ hdev->roce_base_msix_offset =
+ hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
+ HCLGEVF_MSIX_OFT_ROCEE_M,
+ HCLGEVF_MSIX_OFT_ROCEE_S);
+ hdev->num_roce_msix =
+ hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
+ HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
+
+ /* VF should have NIC vectors and Roce vectors, NIC vectors
+ * are queued before Roce vectors. The offset is fixed to 64.
+ */
+ hdev->num_msi = hdev->num_roce_msix +
+ hdev->roce_base_msix_offset;
+ } else {
+ hdev->num_msi =
+ hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
+ HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
+ }
+
+ return 0;
+}
+
static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
@@ -1750,18 +1794,26 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
return ret;
}
+ ret = hclgevf_cmd_init(hdev);
+ if (ret)
+ goto err_cmd_init;
+
+ /* Get vf resource */
+ ret = hclgevf_query_vf_resource(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query vf status error, ret = %d.\n", ret);
+ goto err_query_vf;
+ }
+
ret = hclgevf_init_msi(hdev);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
- goto err_irq_init;
+ goto err_query_vf;
}
hclgevf_state_init(hdev);
- ret = hclgevf_cmd_init(hdev);
- if (ret)
- goto err_cmd_init;
-
ret = hclgevf_misc_irq_init(hdev);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
@@ -1817,11 +1869,11 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
err_config:
hclgevf_misc_irq_uninit(hdev);
err_misc_irq_init:
- hclgevf_cmd_uninit(hdev);
-err_cmd_init:
hclgevf_state_uninit(hdev);
hclgevf_uninit_msi(hdev);
-err_irq_init:
+err_query_vf:
+ hclgevf_cmd_uninit(hdev);
+err_cmd_init:
hclgevf_pci_uninit(hdev);
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 0656e8e5c5f0..b23ba171473c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -12,7 +12,6 @@
#define HCLGEVF_MOD_VERSION "1.0"
#define HCLGEVF_DRIVER_NAME "hclgevf"
-#define HCLGEVF_ROCEE_VECTOR_NUM 0
#define HCLGEVF_MISC_VECTOR_NUM 0
#define HCLGEVF_INVALID_VPORT 0xffff
@@ -150,6 +149,9 @@ struct hclgevf_dev {
u16 num_msi;
u16 num_msi_left;
u16 num_msi_used;
+ u16 num_roce_msix; /* Num of roce vectors for this VF */
+ u16 roce_base_msix_offset;
+ int roce_base_vector;
u32 base_msi_vector;
u16 *vector_status;
int *vector_irq;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 5b122728dcb4..09e9da10b786 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -983,6 +983,7 @@ static int nic_dev_init(struct pci_dev *pdev)
hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS,
nic_dev, link_status_event_handler);
+ SET_NETDEV_DEV(netdev, &pdev->dev);
err = register_netdev(netdev);
if (err) {
dev_err(&pdev->dev, "Failed to register netdev\n");
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 9128858479c4..2353ec829c04 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -229,6 +229,7 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
txq->txq_stats.tx_busy++;
u64_stats_update_end(&txq->txq_stats.syncp);
err = NETDEV_TX_BUSY;
+ wqe_size = 0;
goto flush_skbs;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index b13b42e5a1d9..a795c07d0df7 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -225,19 +225,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
E1000_STATUS_FUNC_SHIFT;
- /* Make sure the PHY is in a good state. Several people have reported
- * firmware leaving the PHY's page select register set to something
- * other than the default of zero, which causes the PHY ID read to
- * access something other than the intended register.
- */
- ret_val = hw->phy.ops.reset(hw);
- if (ret_val) {
- hw_dbg("Error resetting the PHY.\n");
- goto out;
- }
-
/* Set phy->phy_addr and phy->id. */
- igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0);
ret_val = igb_get_phy_id_82575(hw);
if (ret_val)
return ret_val;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index e3a0c02721c9..25720d95d4ea 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -6031,7 +6031,7 @@ static int igb_tx_map(struct igb_ring *tx_ring,
* We also need this memory barrier to make certain all of the
* status bits have been updated before next_to_watch is written.
*/
- wmb();
+ dma_wmb();
/* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
@@ -8531,7 +8531,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
- wmb();
+ dma_wmb();
writel(i, rx_ring->tail);
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 144d5fe6b944..4fc906c6166b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -855,7 +855,8 @@ void ixgbe_free_rx_resources(struct ixgbe_ring *);
void ixgbe_free_tx_resources(struct ixgbe_ring *);
void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
-void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *);
+void ixgbe_disable_rx(struct ixgbe_adapter *adapter);
+void ixgbe_disable_tx(struct ixgbe_adapter *adapter);
void ixgbe_update_stats(struct ixgbe_adapter *adapter);
int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index bd1ba88ec1d5..e5a8461fe6a9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -511,7 +511,7 @@ static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
static int ixgbe_get_regs_len(struct net_device *netdev)
{
-#define IXGBE_REGS_LEN 1139
+#define IXGBE_REGS_LEN 1145
return IXGBE_REGS_LEN * sizeof(u32);
}
@@ -874,6 +874,14 @@ static void ixgbe_get_regs(struct net_device *netdev,
/* X540 specific DCB registers */
regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
+
+ /* Security config registers */
+ regs_buff[1139] = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ regs_buff[1140] = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
+ regs_buff[1141] = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
+ regs_buff[1142] = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ regs_buff[1143] = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ regs_buff[1144] = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
}
static int ixgbe_get_eeprom_len(struct net_device *netdev)
@@ -1690,35 +1698,17 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
{
- struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
- struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
- struct ixgbe_hw *hw = &adapter->hw;
- u32 reg_ctl;
-
- /* shut down the DMA engines now so they can be reinitialized later */
+ /* Shut down the DMA engines now so they can be reinitialized later,
+ * since the test rings and normally used rings should overlap on
+ * queue 0 we can just use the standard disable Rx/Tx calls and they
+ * will take care of disabling the test rings for us.
+ */
/* first Rx */
- hw->mac.ops.disable_rx(hw);
- ixgbe_disable_rx_queue(adapter, rx_ring);
+ ixgbe_disable_rx(adapter);
/* now Tx */
- reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
- reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
-
- switch (hw->mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- case ixgbe_mac_x550em_a:
- reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
- reg_ctl &= ~IXGBE_DMATXCTL_TE;
- IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
- break;
- default:
- break;
- }
+ ixgbe_disable_tx(adapter);
ixgbe_reset(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 5a6600f7b382..447098005490 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -4022,38 +4022,6 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
}
}
-void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *ring)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- int wait_loop = IXGBE_MAX_RX_DESC_POLL;
- u32 rxdctl;
- u8 reg_idx = ring->reg_idx;
-
- if (ixgbe_removed(hw->hw_addr))
- return;
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
- rxdctl &= ~IXGBE_RXDCTL_ENABLE;
-
- /* write value back with RXDCTL.ENABLE bit cleared */
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
-
- if (hw->mac.type == ixgbe_mac_82598EB &&
- !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
- return;
-
- /* the hardware may take up to 100us to really disable the rx queue */
- do {
- udelay(10);
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
- } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
-
- if (!wait_loop) {
- e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
- "the polling period\n", reg_idx);
- }
-}
-
void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
@@ -4063,9 +4031,13 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
- /* disable queue to avoid issues while updating state */
+ /* disable queue to avoid use of these values while updating state */
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
- ixgbe_disable_rx_queue(adapter, ring);
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+
+ /* write value back with RXDCTL.ENABLE bit cleared */
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
+ IXGBE_WRITE_FLUSH(hw);
IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
@@ -5633,6 +5605,212 @@ void ixgbe_up(struct ixgbe_adapter *adapter)
ixgbe_up_complete(adapter);
}
+static unsigned long ixgbe_get_completion_timeout(struct ixgbe_adapter *adapter)
+{
+ u16 devctl2;
+
+ pcie_capability_read_word(adapter->pdev, PCI_EXP_DEVCTL2, &devctl2);
+
+ switch (devctl2 & IXGBE_PCIDEVCTRL2_TIMEO_MASK) {
+ case IXGBE_PCIDEVCTRL2_17_34s:
+ case IXGBE_PCIDEVCTRL2_4_8s:
+ /* For now we cap the upper limit on delay to 2 seconds
+ * as we end up going up to 34 seconds of delay in worst
+ * case timeout value.
+ */
+ case IXGBE_PCIDEVCTRL2_1_2s:
+ return 2000000ul; /* 2.0 s */
+ case IXGBE_PCIDEVCTRL2_260_520ms:
+ return 520000ul; /* 520 ms */
+ case IXGBE_PCIDEVCTRL2_65_130ms:
+ return 130000ul; /* 130 ms */
+ case IXGBE_PCIDEVCTRL2_16_32ms:
+ return 32000ul; /* 32 ms */
+ case IXGBE_PCIDEVCTRL2_1_2ms:
+ return 2000ul; /* 2 ms */
+ case IXGBE_PCIDEVCTRL2_50_100us:
+ return 100ul; /* 100 us */
+ case IXGBE_PCIDEVCTRL2_16_32ms_def:
+ return 32000ul; /* 32 ms */
+ default:
+ break;
+ }
+
+ /* We shouldn't need to hit this path, but just in case default as
+ * though completion timeout is not supported and support 32ms.
+ */
+ return 32000ul;
+}
+
+void ixgbe_disable_rx(struct ixgbe_adapter *adapter)
+{
+ unsigned long wait_delay, delay_interval;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, wait_loop;
+ u32 rxdctl;
+
+ /* disable receives */
+ hw->mac.ops.disable_rx(hw);
+
+ if (ixgbe_removed(hw->hw_addr))
+ return;
+
+ /* disable all enabled Rx queues */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+ u8 reg_idx = ring->reg_idx;
+
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+ rxdctl |= IXGBE_RXDCTL_SWFLSH;
+
+ /* write value back with RXDCTL.ENABLE bit cleared */
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
+ }
+
+ /* RXDCTL.EN may not change on 82598 if link is down, so skip it */
+ if (hw->mac.type == ixgbe_mac_82598EB &&
+ !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+ return;
+
+ /* Determine our minimum delay interval. We will increase this value
+ * with each subsequent test. This way if the device returns quickly
+ * we should spend as little time as possible waiting, however as
+ * the time increases we will wait for larger periods of time.
+ *
+ * The trick here is that we increase the interval using the
+ * following pattern: 1x 3x 5x 7x 9x 11x 13x 15x 17x 19x. The result
+ * of that wait is that it totals up to 100x whatever interval we
+ * choose. Since our minimum wait is 100us we can just divide the
+ * total timeout by 100 to get our minimum delay interval.
+ */
+ delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
+
+ wait_loop = IXGBE_MAX_RX_DESC_POLL;
+ wait_delay = delay_interval;
+
+ while (wait_loop--) {
+ usleep_range(wait_delay, wait_delay + 10);
+ wait_delay += delay_interval * 2;
+ rxdctl = 0;
+
+ /* OR together the reading of all the active RXDCTL registers,
+ * and then test the result. We need the disable to complete
+ * before we start freeing the memory and invalidating the
+ * DMA mappings.
+ */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+ u8 reg_idx = ring->reg_idx;
+
+ rxdctl |= IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ }
+
+ if (!(rxdctl & IXGBE_RXDCTL_ENABLE))
+ return;
+ }
+
+ e_err(drv,
+ "RXDCTL.ENABLE for one or more queues not cleared within the polling period\n");
+}
+
+void ixgbe_disable_tx(struct ixgbe_adapter *adapter)
+{
+ unsigned long wait_delay, delay_interval;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, wait_loop;
+ u32 txdctl;
+
+ if (ixgbe_removed(hw->hw_addr))
+ return;
+
+ /* disable all enabled Tx queues */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *ring = adapter->tx_ring[i];
+ u8 reg_idx = ring->reg_idx;
+
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
+ }
+
+ /* disable all enabled XDP Tx queues */
+ for (i = 0; i < adapter->num_xdp_queues; i++) {
+ struct ixgbe_ring *ring = adapter->xdp_ring[i];
+ u8 reg_idx = ring->reg_idx;
+
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
+ }
+
+ /* If the link is not up there shouldn't be much in the way of
+ * pending transactions. Those that are left will be flushed out
+ * when the reset logic goes through the flush sequence to clean out
+ * the pending Tx transactions.
+ */
+ if (!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+ goto dma_engine_disable;
+
+ /* Determine our minimum delay interval. We will increase this value
+ * with each subsequent test. This way if the device returns quickly
+ * we should spend as little time as possible waiting, however as
+ * the time increases we will wait for larger periods of time.
+ *
+ * The trick here is that we increase the interval using the
+ * following pattern: 1x 3x 5x 7x 9x 11x 13x 15x 17x 19x. The result
+ * of that wait is that it totals up to 100x whatever interval we
+ * choose. Since our minimum wait is 100us we can just divide the
+ * total timeout by 100 to get our minimum delay interval.
+ */
+ delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
+
+ wait_loop = IXGBE_MAX_RX_DESC_POLL;
+ wait_delay = delay_interval;
+
+ while (wait_loop--) {
+ usleep_range(wait_delay, wait_delay + 10);
+ wait_delay += delay_interval * 2;
+ txdctl = 0;
+
+ /* OR together the reading of all the active TXDCTL registers,
+ * and then test the result. We need the disable to complete
+ * before we start freeing the memory and invalidating the
+ * DMA mappings.
+ */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *ring = adapter->tx_ring[i];
+ u8 reg_idx = ring->reg_idx;
+
+ txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
+ }
+ for (i = 0; i < adapter->num_xdp_queues; i++) {
+ struct ixgbe_ring *ring = adapter->xdp_ring[i];
+ u8 reg_idx = ring->reg_idx;
+
+ txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
+ }
+
+ if (!(txdctl & IXGBE_TXDCTL_ENABLE))
+ goto dma_engine_disable;
+ }
+
+ e_err(drv,
+ "TXDCTL.ENABLE for one or more queues not cleared within the polling period\n");
+
+dma_engine_disable:
+ /* Disable the Tx DMA engine on 82599 and later MAC */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
+ (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
+ ~IXGBE_DMATXCTL_TE));
+ /* fall through */
+ default:
+ break;
+ }
+}
+
void ixgbe_reset(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -5814,24 +5992,19 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
return; /* do nothing if already down */
- /* disable receives */
- hw->mac.ops.disable_rx(hw);
+ /* Shut off incoming Tx traffic */
+ netif_tx_stop_all_queues(netdev);
- /* disable all enabled rx queues */
- for (i = 0; i < adapter->num_rx_queues; i++)
- /* this call also flushes the previous write */
- ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
+ /* call carrier off first to avoid false dev_watchdog timeouts */
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
- usleep_range(10000, 20000);
+ /* Disable Rx */
+ ixgbe_disable_rx(adapter);
/* synchronize_sched() needed for pending XDP buffers to drain */
if (adapter->xdp_ring[0])
synchronize_sched();
- netif_tx_stop_all_queues(netdev);
-
- /* call carrier off first to avoid false dev_watchdog timeouts */
- netif_carrier_off(netdev);
- netif_tx_disable(netdev);
ixgbe_irq_disable(adapter);
@@ -5859,30 +6032,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
}
/* disable transmits in the hardware now that interrupts are off */
- for (i = 0; i < adapter->num_tx_queues; i++) {
- u8 reg_idx = adapter->tx_ring[i]->reg_idx;
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
- }
- for (i = 0; i < adapter->num_xdp_queues; i++) {
- u8 reg_idx = adapter->xdp_ring[i]->reg_idx;
-
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
- }
-
- /* Disable the Tx DMA engine on 82599 and later MAC */
- switch (hw->mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- case ixgbe_mac_x550em_a:
- IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
- (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
- ~IXGBE_DMATXCTL_TE));
- break;
- default:
- break;
- }
+ ixgbe_disable_tx(adapter);
if (!pci_channel_offline(adapter->pdev))
ixgbe_reset(adapter);
@@ -6469,6 +6619,11 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ if (adapter->xdp_prog) {
+ e_warn(probe, "MTU cannot be changed while XDP program is loaded\n");
+ return -EPERM;
+ }
+
/*
* For 82599EB we cannot allow legacy VFs to enable their receive
* paths when MTU greater than 1500 is configured. So display a
@@ -9407,6 +9562,11 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
features &= ~NETIF_F_LRO;
+ if (adapter->xdp_prog && (features & NETIF_F_LRO)) {
+ e_dev_err("LRO is not supported with XDP\n");
+ features &= ~NETIF_F_LRO;
+ }
+
return features;
}
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 06ff185eb188..a5ab6f3403ae 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1911,10 +1911,10 @@ jme_wait_link(struct jme_adapter *jme)
{
u32 phylink, to = JME_WAIT_LINK_TIME;
- mdelay(1000);
+ msleep(1000);
phylink = jme_linkstat_from_phy(jme);
while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) {
- mdelay(10);
+ usleep_range(10000, 11000);
phylink = jme_linkstat_from_phy(jme);
}
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 0ad2f3f7da85..55c2a56c5dae 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -295,10 +295,10 @@
#define MVNETA_RSS_LU_TABLE_SIZE 1
/* Max number of Rx descriptors */
-#define MVNETA_MAX_RXD 128
+#define MVNETA_MAX_RXD 512
/* Max number of Tx descriptors */
-#define MVNETA_MAX_TXD 532
+#define MVNETA_MAX_TXD 1024
/* Max number of allowed TCP segments for software TSO */
#define MVNETA_MAX_TSO_SEGS 100
@@ -328,6 +328,8 @@
enum {
ETHTOOL_STAT_EEE_WAKEUP,
+ ETHTOOL_STAT_SKB_ALLOC_ERR,
+ ETHTOOL_STAT_REFILL_ERR,
ETHTOOL_MAX_STATS,
};
@@ -375,6 +377,8 @@ static const struct mvneta_statistic mvneta_statistics[] = {
{ 0x3054, T_REG_32, "fc_sent", },
{ 0x300c, T_REG_32, "internal_mac_transmit_err", },
{ ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
+ { ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", },
+ { ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", },
};
struct mvneta_pcpu_stats {
@@ -479,7 +483,10 @@ struct mvneta_port {
#define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
#define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
#define MVNETA_RXD_L3_IP4 BIT(25)
-#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
+#define MVNETA_RXD_LAST_DESC BIT(26)
+#define MVNETA_RXD_FIRST_DESC BIT(27)
+#define MVNETA_RXD_FIRST_LAST_DESC (MVNETA_RXD_FIRST_DESC | \
+ MVNETA_RXD_LAST_DESC)
#define MVNETA_RXD_L4_CSUM_OK BIT(30)
#if defined(__LITTLE_ENDIAN)
@@ -589,9 +596,6 @@ struct mvneta_rx_queue {
/* num of rx descriptors in the rx descriptor ring */
int size;
- /* counter of times when mvneta_refill() failed */
- int missed;
-
u32 pkts_coal;
u32 time_coal;
@@ -609,6 +613,18 @@ struct mvneta_rx_queue {
/* Index of the next RX DMA descriptor to process */
int next_desc_to_proc;
+
+ /* Index of first RX DMA descriptor to refill */
+ int first_to_refill;
+ u32 refill_num;
+
+ /* pointer to uncomplete skb buffer */
+ struct sk_buff *skb;
+ int left_size;
+
+ /* error counters */
+ u32 skb_alloc_err;
+ u32 refill_err;
};
static enum cpuhp_state online_hpstate;
@@ -621,6 +637,7 @@ static int txq_number = 8;
static int rxq_def;
static int rx_copybreak __read_mostly = 256;
+static int rx_header_size __read_mostly = 128;
/* HW BM need that each port be identify by a unique ID */
static int global_port_id;
@@ -1684,13 +1701,6 @@ static void mvneta_rx_error(struct mvneta_port *pp,
{
u32 status = rx_desc->status;
- if (!mvneta_rxq_desc_is_first_last(status)) {
- netdev_err(pp->dev,
- "bad rx status %08x (buffer oversize), size=%d\n",
- status, rx_desc->data_size);
- return;
- }
-
switch (status & MVNETA_RXD_ERR_CODE_MASK) {
case MVNETA_RXD_ERR_CRC:
netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
@@ -1715,7 +1725,8 @@ static void mvneta_rx_error(struct mvneta_port *pp,
static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
struct sk_buff *skb)
{
- if ((status & MVNETA_RXD_L3_IP4) &&
+ if ((pp->dev->features & NETIF_F_RXCSUM) &&
+ (status & MVNETA_RXD_L3_IP4) &&
(status & MVNETA_RXD_L4_CSUM_OK)) {
skb->csum = 0;
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1790,47 +1801,30 @@ static void mvneta_txq_done(struct mvneta_port *pp,
}
}
-void *mvneta_frag_alloc(unsigned int frag_size)
-{
- if (likely(frag_size <= PAGE_SIZE))
- return netdev_alloc_frag(frag_size);
- else
- return kmalloc(frag_size, GFP_ATOMIC);
-}
-EXPORT_SYMBOL_GPL(mvneta_frag_alloc);
-
-void mvneta_frag_free(unsigned int frag_size, void *data)
-{
- if (likely(frag_size <= PAGE_SIZE))
- skb_free_frag(data);
- else
- kfree(data);
-}
-EXPORT_SYMBOL_GPL(mvneta_frag_free);
-
/* Refill processing for SW buffer management */
+/* Allocate page per descriptor */
static int mvneta_rx_refill(struct mvneta_port *pp,
struct mvneta_rx_desc *rx_desc,
- struct mvneta_rx_queue *rxq)
-
+ struct mvneta_rx_queue *rxq,
+ gfp_t gfp_mask)
{
dma_addr_t phys_addr;
- void *data;
+ struct page *page;
- data = mvneta_frag_alloc(pp->frag_size);
- if (!data)
+ page = __dev_alloc_page(gfp_mask);
+ if (!page)
return -ENOMEM;
- phys_addr = dma_map_single(pp->dev->dev.parent, data,
- MVNETA_RX_BUF_SIZE(pp->pkt_size),
- DMA_FROM_DEVICE);
+ /* map page for use */
+ phys_addr = dma_map_page(pp->dev->dev.parent, page, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
- mvneta_frag_free(pp->frag_size, data);
+ __free_page(page);
return -ENOMEM;
}
phys_addr += pp->rx_offset_correction;
- mvneta_rx_desc_fill(rx_desc, phys_addr, data, rxq);
+ mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
return 0;
}
@@ -1893,115 +1887,192 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
for (i = 0; i < rxq->size; i++) {
struct mvneta_rx_desc *rx_desc = rxq->descs + i;
void *data = rxq->buf_virt_addr[i];
+ if (!data || !(rx_desc->buf_phys_addr))
+ continue;
dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
- mvneta_frag_free(pp->frag_size, data);
+ __free_page(data);
}
}
+static inline
+int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
+{
+ struct mvneta_rx_desc *rx_desc;
+ int curr_desc = rxq->first_to_refill;
+ int i;
+
+ for (i = 0; (i < rxq->refill_num) && (i < 64); i++) {
+ rx_desc = rxq->descs + curr_desc;
+ if (!(rx_desc->buf_phys_addr)) {
+ if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
+ pr_err("Can't refill queue %d. Done %d from %d\n",
+ rxq->id, i, rxq->refill_num);
+ rxq->refill_err++;
+ break;
+ }
+ }
+ curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc);
+ }
+ rxq->refill_num -= i;
+ rxq->first_to_refill = curr_desc;
+
+ return i;
+}
+
/* Main rx processing when using software buffer management */
-static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
+static int mvneta_rx_swbm(struct napi_struct *napi,
+ struct mvneta_port *pp, int budget,
struct mvneta_rx_queue *rxq)
{
- struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
struct net_device *dev = pp->dev;
- int rx_done;
+ int rx_todo, rx_proc;
+ int refill = 0;
u32 rcvd_pkts = 0;
u32 rcvd_bytes = 0;
/* Get number of received packets */
- rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
-
- if (rx_todo > rx_done)
- rx_todo = rx_done;
-
- rx_done = 0;
+ rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
+ rx_proc = 0;
/* Fairness NAPI loop */
- while (rx_done < rx_todo) {
+ while ((rcvd_pkts < budget) && (rx_proc < rx_todo)) {
struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
- struct sk_buff *skb;
unsigned char *data;
+ struct page *page;
dma_addr_t phys_addr;
- u32 rx_status, frag_size;
- int rx_bytes, err, index;
+ u32 rx_status, index;
+ int rx_bytes, skb_size, copy_size;
+ int frag_num, frag_size, frag_offset;
- rx_done++;
- rx_status = rx_desc->status;
- rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
index = rx_desc - rxq->descs;
- data = rxq->buf_virt_addr[index];
- phys_addr = rx_desc->buf_phys_addr - pp->rx_offset_correction;
-
- if (!mvneta_rxq_desc_is_first_last(rx_status) ||
- (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
- mvneta_rx_error(pp, rx_desc);
-err_drop_frame:
- dev->stats.rx_errors++;
- /* leave the descriptor untouched */
- continue;
- }
-
- if (rx_bytes <= rx_copybreak) {
- /* better copy a small frame and not unmap the DMA region */
- skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
- if (unlikely(!skb))
- goto err_drop_frame;
-
- dma_sync_single_range_for_cpu(dev->dev.parent,
- phys_addr,
- MVNETA_MH_SIZE + NET_SKB_PAD,
- rx_bytes,
- DMA_FROM_DEVICE);
- skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
- rx_bytes);
+ page = (struct page *)rxq->buf_virt_addr[index];
+ data = page_address(page);
+ /* Prefetch header */
+ prefetch(data);
- skb->protocol = eth_type_trans(skb, dev);
- mvneta_rx_csum(pp, rx_status, skb);
- napi_gro_receive(&port->napi, skb);
-
- rcvd_pkts++;
- rcvd_bytes += rx_bytes;
+ phys_addr = rx_desc->buf_phys_addr;
+ rx_status = rx_desc->status;
+ rx_proc++;
+ rxq->refill_num++;
+
+ if (rx_status & MVNETA_RXD_FIRST_DESC) {
+ /* Check errors only for FIRST descriptor */
+ if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
+ mvneta_rx_error(pp, rx_desc);
+ dev->stats.rx_errors++;
+ /* leave the descriptor untouched */
+ continue;
+ }
+ rx_bytes = rx_desc->data_size -
+ (ETH_FCS_LEN + MVNETA_MH_SIZE);
+
+ /* Allocate small skb for each new packet */
+ skb_size = max(rx_copybreak, rx_header_size);
+ rxq->skb = netdev_alloc_skb_ip_align(dev, skb_size);
+ if (unlikely(!rxq->skb)) {
+ netdev_err(dev,
+ "Can't allocate skb on queue %d\n",
+ rxq->id);
+ dev->stats.rx_dropped++;
+ rxq->skb_alloc_err++;
+ continue;
+ }
+ copy_size = min(skb_size, rx_bytes);
+
+ /* Copy data from buffer to SKB, skip Marvell header */
+ memcpy(rxq->skb->data, data + MVNETA_MH_SIZE,
+ copy_size);
+ skb_put(rxq->skb, copy_size);
+ rxq->left_size = rx_bytes - copy_size;
+
+ mvneta_rx_csum(pp, rx_status, rxq->skb);
+ if (rxq->left_size == 0) {
+ int size = copy_size + MVNETA_MH_SIZE;
+
+ dma_sync_single_range_for_cpu(dev->dev.parent,
+ phys_addr, 0,
+ size,
+ DMA_FROM_DEVICE);
+
+ /* leave the descriptor and buffer untouched */
+ } else {
+ /* refill descriptor with new buffer later */
+ rx_desc->buf_phys_addr = 0;
+
+ frag_num = 0;
+ frag_offset = copy_size + MVNETA_MH_SIZE;
+ frag_size = min(rxq->left_size,
+ (int)(PAGE_SIZE - frag_offset));
+ skb_add_rx_frag(rxq->skb, frag_num, page,
+ frag_offset, frag_size,
+ PAGE_SIZE);
+ dma_unmap_single(dev->dev.parent, phys_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ rxq->left_size -= frag_size;
+ }
+ } else {
+ /* Middle or Last descriptor */
+ if (unlikely(!rxq->skb)) {
+ pr_debug("no skb for rx_status 0x%x\n",
+ rx_status);
+ continue;
+ }
+ if (!rxq->left_size) {
+ /* last descriptor has only FCS */
+ /* and can be discarded */
+ dma_sync_single_range_for_cpu(dev->dev.parent,
+ phys_addr, 0,
+ ETH_FCS_LEN,
+ DMA_FROM_DEVICE);
+ /* leave the descriptor and buffer untouched */
+ } else {
+ /* refill descriptor with new buffer later */
+ rx_desc->buf_phys_addr = 0;
+
+ frag_num = skb_shinfo(rxq->skb)->nr_frags;
+ frag_offset = 0;
+ frag_size = min(rxq->left_size,
+ (int)(PAGE_SIZE - frag_offset));
+ skb_add_rx_frag(rxq->skb, frag_num, page,
+ frag_offset, frag_size,
+ PAGE_SIZE);
+
+ dma_unmap_single(dev->dev.parent, phys_addr,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+
+ rxq->left_size -= frag_size;
+ }
+ } /* Middle or Last descriptor */
- /* leave the descriptor and buffer untouched */
+ if (!(rx_status & MVNETA_RXD_LAST_DESC))
+ /* no last descriptor this time */
continue;
- }
- /* Refill processing */
- err = mvneta_rx_refill(pp, rx_desc, rxq);
- if (err) {
- netdev_err(dev, "Linux processing - Can't refill\n");
- rxq->missed++;
- goto err_drop_frame;
+ if (rxq->left_size) {
+ pr_err("get last desc, but left_size (%d) != 0\n",
+ rxq->left_size);
+ dev_kfree_skb_any(rxq->skb);
+ rxq->left_size = 0;
+ rxq->skb = NULL;
+ continue;
}
-
- frag_size = pp->frag_size;
-
- skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
-
- /* After refill old buffer has to be unmapped regardless
- * the skb is successfully built or not.
- */
- dma_unmap_single(dev->dev.parent, phys_addr,
- MVNETA_RX_BUF_SIZE(pp->pkt_size),
- DMA_FROM_DEVICE);
-
- if (!skb)
- goto err_drop_frame;
-
rcvd_pkts++;
- rcvd_bytes += rx_bytes;
+ rcvd_bytes += rxq->skb->len;
/* Linux processing */
- skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
- skb_put(skb, rx_bytes);
-
- skb->protocol = eth_type_trans(skb, dev);
+ rxq->skb->protocol = eth_type_trans(rxq->skb, dev);
- mvneta_rx_csum(pp, rx_status, skb);
+ if (dev->features & NETIF_F_GRO)
+ napi_gro_receive(napi, rxq->skb);
+ else
+ netif_receive_skb(rxq->skb);
- napi_gro_receive(&port->napi, skb);
+ /* clean uncomplete skb pointer in queue */
+ rxq->skb = NULL;
+ rxq->left_size = 0;
}
if (rcvd_pkts) {
@@ -2013,17 +2084,20 @@ err_drop_frame:
u64_stats_update_end(&stats->syncp);
}
+ /* return some buffers to hardware queue, one at a time is too slow */
+ refill = mvneta_rx_refill_queue(pp, rxq);
+
/* Update rxq management counters */
- mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
+ mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
- return rx_done;
+ return rcvd_pkts;
}
/* Main rx processing when using hardware buffer management */
-static int mvneta_rx_hwbm(struct mvneta_port *pp, int rx_todo,
+static int mvneta_rx_hwbm(struct napi_struct *napi,
+ struct mvneta_port *pp, int rx_todo,
struct mvneta_rx_queue *rxq)
{
- struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
struct net_device *dev = pp->dev;
int rx_done;
u32 rcvd_pkts = 0;
@@ -2085,7 +2159,7 @@ err_drop_frame:
skb->protocol = eth_type_trans(skb, dev);
mvneta_rx_csum(pp, rx_status, skb);
- napi_gro_receive(&port->napi, skb);
+ napi_gro_receive(napi, skb);
rcvd_pkts++;
rcvd_bytes += rx_bytes;
@@ -2102,7 +2176,7 @@ err_drop_frame:
err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
if (err) {
netdev_err(dev, "Linux processing - Can't refill\n");
- rxq->missed++;
+ rxq->refill_err++;
goto err_drop_frame_ret_pool;
}
@@ -2129,7 +2203,7 @@ err_drop_frame:
mvneta_rx_csum(pp, rx_status, skb);
- napi_gro_receive(&port->napi, skb);
+ napi_gro_receive(napi, skb);
}
if (rcvd_pkts) {
@@ -2722,9 +2796,11 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
if (rx_queue) {
rx_queue = rx_queue - 1;
if (pp->bm_priv)
- rx_done = mvneta_rx_hwbm(pp, budget, &pp->rxqs[rx_queue]);
+ rx_done = mvneta_rx_hwbm(napi, pp, budget,
+ &pp->rxqs[rx_queue]);
else
- rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]);
+ rx_done = mvneta_rx_swbm(napi, pp, budget,
+ &pp->rxqs[rx_queue]);
}
if (rx_done < budget) {
@@ -2761,9 +2837,11 @@ static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
for (i = 0; i < num; i++) {
memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
- if (mvneta_rx_refill(pp, rxq->descs + i, rxq) != 0) {
- netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n",
- __func__, rxq->id, i, num);
+ if (mvneta_rx_refill(pp, rxq->descs + i, rxq,
+ GFP_KERNEL) != 0) {
+ netdev_err(pp->dev,
+ "%s:rxq %d, %d of %d buffs filled\n",
+ __func__, rxq->id, i, num);
break;
}
}
@@ -2821,21 +2899,23 @@ static void mvneta_rxq_hw_init(struct mvneta_port *pp,
mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
- /* Set Offset */
- mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD - pp->rx_offset_correction);
-
/* Set coalescing pkts and time */
mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
if (!pp->bm_priv) {
- /* Fill RXQ with buffers from RX pool */
- mvneta_rxq_buf_size_set(pp, rxq,
- MVNETA_RX_BUF_SIZE(pp->pkt_size));
+ /* Set Offset */
+ mvneta_rxq_offset_set(pp, rxq, 0);
+ mvneta_rxq_buf_size_set(pp, rxq, pp->frag_size);
mvneta_rxq_bm_disable(pp, rxq);
mvneta_rxq_fill(pp, rxq, rxq->size);
} else {
+ /* Set Offset */
+ mvneta_rxq_offset_set(pp, rxq,
+ NET_SKB_PAD - pp->rx_offset_correction);
+
mvneta_rxq_bm_enable(pp, rxq);
+ /* Fill RXQ with buffers from RX pool */
mvneta_rxq_long_pool_set(pp, rxq);
mvneta_rxq_short_pool_set(pp, rxq);
mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
@@ -2864,6 +2944,9 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp,
{
mvneta_rxq_drop_pkts(pp, rxq);
+ if (rxq->skb)
+ dev_kfree_skb_any(rxq->skb);
+
if (rxq->descs)
dma_free_coherent(pp->dev->dev.parent,
rxq->size * MVNETA_DESC_ALIGNED_SIZE,
@@ -2874,6 +2957,10 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp,
rxq->last_desc = 0;
rxq->next_desc_to_proc = 0;
rxq->descs_phys = 0;
+ rxq->first_to_refill = 0;
+ rxq->refill_num = 0;
+ rxq->skb = NULL;
+ rxq->left_size = 0;
}
static int mvneta_txq_sw_init(struct mvneta_port *pp,
@@ -3177,8 +3264,6 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
mvneta_bm_update_mtu(pp, mtu);
pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
- pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
ret = mvneta_setup_rxqs(pp);
if (ret) {
@@ -3194,7 +3279,6 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
on_each_cpu(mvneta_percpu_enable, pp, true);
mvneta_start_dev(pp);
- mvneta_port_up(pp);
netdev_update_features(dev);
@@ -3666,8 +3750,7 @@ static int mvneta_open(struct net_device *dev)
int ret;
pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
- pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ pp->frag_size = PAGE_SIZE;
ret = mvneta_setup_rxqs(pp);
if (ret)
@@ -3962,6 +4045,12 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
case ETHTOOL_STAT_EEE_WAKEUP:
val = phylink_get_eee_err(pp->phylink);
break;
+ case ETHTOOL_STAT_SKB_ALLOC_ERR:
+ val = pp->rxqs[0].skb_alloc_err;
+ break;
+ case ETHTOOL_STAT_REFILL_ERR:
+ val = pp->rxqs[0].refill_err;
+ break;
}
break;
}
@@ -4362,14 +4451,6 @@ static int mvneta_probe(struct platform_device *pdev)
pp->dn = dn;
pp->rxq_def = rxq_def;
-
- /* Set RX packet offset correction for platforms, whose
- * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
- * platforms and 0B for 32-bit ones.
- */
- pp->rx_offset_correction =
- max(0, NET_SKB_PAD - MVNETA_RX_PKT_OFFSET_CORRECTION);
-
pp->indir[0] = rxq_def;
/* Get special SoC configurations */
@@ -4457,16 +4538,28 @@ static int mvneta_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
pp->id = global_port_id++;
+ pp->rx_offset_correction = 0; /* not relevant for SW BM */
/* Obtain access to BM resources if enabled and already initialized */
bm_node = of_parse_phandle(dn, "buffer-manager", 0);
- if (bm_node && bm_node->data) {
- pp->bm_priv = bm_node->data;
- err = mvneta_bm_port_init(pdev, pp);
- if (err < 0) {
- dev_info(&pdev->dev, "use SW buffer management\n");
- pp->bm_priv = NULL;
+ if (bm_node) {
+ pp->bm_priv = mvneta_bm_get(bm_node);
+ if (pp->bm_priv) {
+ err = mvneta_bm_port_init(pdev, pp);
+ if (err < 0) {
+ dev_info(&pdev->dev,
+ "use SW buffer management\n");
+ mvneta_bm_put(pp->bm_priv);
+ pp->bm_priv = NULL;
+ }
}
+ /* Set RX packet offset correction for platforms, whose
+ * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
+ * platforms and 0B for 32-bit ones.
+ */
+ pp->rx_offset_correction = max(0,
+ NET_SKB_PAD -
+ MVNETA_RX_PKT_OFFSET_CORRECTION);
}
of_node_put(bm_node);
@@ -4526,6 +4619,7 @@ err_netdev:
mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
1 << pp->id);
+ mvneta_bm_put(pp->bm_priv);
}
err_free_stats:
free_percpu(pp->stats);
@@ -4563,6 +4657,7 @@ static int mvneta_remove(struct platform_device *pdev)
mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
1 << pp->id);
+ mvneta_bm_put(pp->bm_priv);
}
return 0;
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c
index 466939f8f0cf..de468e1bdba9 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.c
+++ b/drivers/net/ethernet/marvell/mvneta_bm.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/skbuff.h>
#include <net/hwbm.h>
@@ -392,6 +393,20 @@ static void mvneta_bm_put_sram(struct mvneta_bm *priv)
MVNETA_BM_BPPI_SIZE);
}
+struct mvneta_bm *mvneta_bm_get(struct device_node *node)
+{
+ struct platform_device *pdev = of_find_device_by_node(node);
+
+ return pdev ? platform_get_drvdata(pdev) : NULL;
+}
+EXPORT_SYMBOL_GPL(mvneta_bm_get);
+
+void mvneta_bm_put(struct mvneta_bm *priv)
+{
+ platform_device_put(priv->pdev);
+}
+EXPORT_SYMBOL_GPL(mvneta_bm_put);
+
static int mvneta_bm_probe(struct platform_device *pdev)
{
struct device_node *dn = pdev->dev.of_node;
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.h b/drivers/net/ethernet/marvell/mvneta_bm.h
index a32de432800c..c8425d35c049 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.h
+++ b/drivers/net/ethernet/marvell/mvneta_bm.h
@@ -130,10 +130,10 @@ struct mvneta_bm_pool {
};
/* Declarations and definitions */
-void *mvneta_frag_alloc(unsigned int frag_size);
-void mvneta_frag_free(unsigned int frag_size, void *data);
-
#if IS_ENABLED(CONFIG_MVNETA_BM)
+struct mvneta_bm *mvneta_bm_get(struct device_node *node);
+void mvneta_bm_put(struct mvneta_bm *priv);
+
void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
struct mvneta_bm_pool *bm_pool, u8 port_map);
void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
@@ -178,5 +178,7 @@ static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv,
static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv,
struct mvneta_bm_pool *bm_pool)
{ return 0; }
+struct mvneta_bm *mvneta_bm_get(struct device_node *node) { return NULL; }
+void mvneta_bm_put(struct mvneta_bm *priv) {}
#endif /* CONFIG_MVNETA_BM */
#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index c30aea2ab767..6e6abdc399de 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -605,10 +605,10 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
dma_addr_t dma_addr;
int i;
- eth->scratch_ring = dma_alloc_coherent(eth->dev,
- cnt * sizeof(struct mtk_tx_dma),
- &eth->phy_scratch_ring,
- GFP_ATOMIC | __GFP_ZERO);
+ eth->scratch_ring = dma_zalloc_coherent(eth->dev,
+ cnt * sizeof(struct mtk_tx_dma),
+ &eth->phy_scratch_ring,
+ GFP_ATOMIC);
if (unlikely(!eth->scratch_ring))
return -ENOMEM;
@@ -623,7 +623,6 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
return -ENOMEM;
- memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
phy_ring_tail = eth->phy_scratch_ring +
(sizeof(struct mtk_tx_dma) * (cnt - 1));
@@ -1318,10 +1317,9 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
return -ENOMEM;
}
- ring->dma = dma_alloc_coherent(eth->dev,
- rx_dma_size * sizeof(*ring->dma),
- &ring->phys,
- GFP_ATOMIC | __GFP_ZERO);
+ ring->dma = dma_zalloc_coherent(eth->dev,
+ rx_dma_size * sizeof(*ring->dma),
+ &ring->phys, GFP_ATOMIC);
if (!ring->dma)
return -ENOMEM;
@@ -2460,42 +2458,6 @@ free_netdev:
return err;
}
-static int mtk_get_chip_id(struct mtk_eth *eth, u32 *chip_id)
-{
- u32 val[2], id[4];
-
- regmap_read(eth->ethsys, ETHSYS_CHIPID0_3, &val[0]);
- regmap_read(eth->ethsys, ETHSYS_CHIPID4_7, &val[1]);
-
- id[3] = ((val[0] >> 16) & 0xff) - '0';
- id[2] = ((val[0] >> 24) & 0xff) - '0';
- id[1] = (val[1] & 0xff) - '0';
- id[0] = ((val[1] >> 8) & 0xff) - '0';
-
- *chip_id = (id[3] * 1000) + (id[2] * 100) +
- (id[1] * 10) + id[0];
-
- if (!(*chip_id)) {
- dev_err(eth->dev, "failed to get chip id\n");
- return -ENODEV;
- }
-
- dev_info(eth->dev, "chip id = %d\n", *chip_id);
-
- return 0;
-}
-
-static bool mtk_is_hwlro_supported(struct mtk_eth *eth)
-{
- switch (eth->chip_id) {
- case MT7622_ETH:
- case MT7623_ETH:
- return true;
- }
-
- return false;
-}
-
static int mtk_probe(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2574,11 +2536,7 @@ static int mtk_probe(struct platform_device *pdev)
if (err)
return err;
- err = mtk_get_chip_id(eth, &eth->chip_id);
- if (err)
- return err;
-
- eth->hwlro = mtk_is_hwlro_supported(eth);
+ eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
for_each_child_of_node(pdev->dev.of_node, mac_np) {
if (!of_device_is_compatible(mac_np,
@@ -2667,19 +2625,19 @@ static int mtk_remove(struct platform_device *pdev)
}
static const struct mtk_soc_data mt2701_data = {
- .caps = MTK_GMAC1_TRGMII,
+ .caps = MTK_GMAC1_TRGMII | MTK_HWLRO,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
};
static const struct mtk_soc_data mt7622_data = {
- .caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW,
+ .caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW | MTK_HWLRO,
.required_clks = MT7622_CLKS_BITMAP,
.required_pctl = false,
};
static const struct mtk_soc_data mt7623_data = {
- .caps = MTK_GMAC1_TRGMII,
+ .caps = MTK_GMAC1_TRGMII | MTK_HWLRO,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
};
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 672b8c353c47..46819297fc3e 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -566,6 +566,7 @@ struct mtk_rx_ring {
#define MTK_GMAC2_SGMII (BIT(10) | MTK_SGMII)
#define MTK_DUAL_GMAC_SHARED_SGMII (BIT(11) | MTK_GMAC1_SGMII | \
MTK_GMAC2_SGMII)
+#define MTK_HWLRO BIT(12)
#define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x))
/* struct mtk_eth_data - This is the structure holding all differences
@@ -635,7 +636,6 @@ struct mtk_eth {
struct regmap *ethsys;
struct regmap *sgmiisys;
struct regmap *pctl;
- u32 chip_id;
bool hwlro;
refcount_t dma_refcnt;
struct mtk_tx_ring tx_ring;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 2d979a652b7b..d2d59444f562 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -159,9 +159,10 @@ static bool use_prio;
module_param_named(use_prio, use_prio, bool, 0444);
MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)");
-int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
+int log_mtts_per_seg = ilog2(1);
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
-MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
+MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment "
+ "(0-7) (default: 0)");
static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
static int arr_argc = 2;
@@ -4410,7 +4411,7 @@ static int __init mlx4_verify_params(void)
if (use_prio != 0)
pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
- if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
+ if ((log_mtts_per_seg < 0) || (log_mtts_per_seg > 7)) {
pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
log_mtts_per_seg);
return -1;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 6e016092a6f1..ebcd2778eeb3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -84,7 +84,6 @@ enum {
MLX4_MIN_MGM_LOG_ENTRY_SIZE = 7,
MLX4_MAX_MGM_LOG_ENTRY_SIZE = 12,
MLX4_MAX_QP_PER_MGM = 4 * ((1 << MLX4_MAX_MGM_LOG_ENTRY_SIZE) / 16 - 2),
- MLX4_MTT_ENTRY_PER_SEG = 8,
};
enum {
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index bae8b22edbb7..ba361c5fbda3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -105,7 +105,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
request->num_mtt =
roundup_pow_of_two(max_t(unsigned, request->num_mtt,
min(1UL << (31 - log_mtts_per_seg),
- si.totalram >> (log_mtts_per_seg - 1))));
+ (si.totalram << 1) >> log_mtts_per_seg)));
+
profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz;
profile[MLX4_RES_RDMARC].size = dev_cap->rdmarc_entry_sz;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 7b1b5ac986d0..31bd56727022 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2958,7 +2958,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
u32 srqn = qp_get_srqn(qpc) & 0xffffff;
int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
struct res_srq *srq;
- int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
+ int local_qpn = vhcr->in_modifier & 0xffffff;
err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index fa7fcca5dc78..f20fda1ced4f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -14,8 +14,8 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
fpga/ipsec.o fpga/tls.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
- en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \
- en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o
+ en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
+ en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o lib/vxlan.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 323ffe8bf7e4..456f30007ad6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -123,7 +123,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
int i;
buf->size = size;
- buf->npages = 1 << get_order(size);
+ buf->npages = DIV_ROUND_UP(size, PAGE_SIZE);
buf->page_shift = PAGE_SHIFT;
buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list),
GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index e1b237ccdf56..c7ed3d20fd54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -147,10 +147,6 @@ struct page_pool;
(DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
#define MLX5E_ICOSQ_MAX_WQEBBS MLX5E_UMR_WQEBBS
-#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
-#define MLX5E_XDP_TX_DS_COUNT \
- ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
-
#define MLX5E_NUM_MAIN_GROUPS 9
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
@@ -348,6 +344,7 @@ enum {
MLX5E_SQ_STATE_IPSEC,
MLX5E_SQ_STATE_AM,
MLX5E_SQ_STATE_TLS,
+ MLX5E_SQ_STATE_REDIRECT,
};
struct mlx5e_sq_wqe_info {
@@ -368,16 +365,14 @@ struct mlx5e_txqsq {
struct mlx5e_cq cq;
- /* write@xmit, read@completion */
- struct {
- struct mlx5e_sq_dma *dma_fifo;
- struct mlx5e_tx_wqe_info *wqe_info;
- } db;
-
/* read only */
struct mlx5_wq_cyc wq;
u32 dma_fifo_mask;
struct mlx5e_sq_stats *stats;
+ struct {
+ struct mlx5e_sq_dma *dma_fifo;
+ struct mlx5e_tx_wqe_info *wqe_info;
+ } db;
void __iomem *uar_map;
struct netdev_queue *txq;
u32 sqn;
@@ -399,30 +394,43 @@ struct mlx5e_txqsq {
} recover;
} ____cacheline_aligned_in_smp;
+struct mlx5e_dma_info {
+ struct page *page;
+ dma_addr_t addr;
+};
+
+struct mlx5e_xdp_info {
+ struct xdp_frame *xdpf;
+ dma_addr_t dma_addr;
+ struct mlx5e_dma_info di;
+};
+
struct mlx5e_xdpsq {
/* data path */
- /* dirtied @rx completion */
+ /* dirtied @completion */
u16 cc;
- u16 pc;
+ bool redirect_flush;
- struct mlx5e_cq cq;
+ /* dirtied @xmit */
+ u16 pc ____cacheline_aligned_in_smp;
+ bool doorbell;
- /* write@xmit, read@completion */
- struct {
- struct mlx5e_dma_info *di;
- bool doorbell;
- bool redirect_flush;
- } db;
+ struct mlx5e_cq cq;
/* read only */
struct mlx5_wq_cyc wq;
+ struct mlx5e_xdpsq_stats *stats;
+ struct {
+ struct mlx5e_xdp_info *xdpi;
+ } db;
void __iomem *uar_map;
u32 sqn;
struct device *pdev;
__be32 mkey_be;
u8 min_inline_mode;
unsigned long state;
+ unsigned int hw_mtu;
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
@@ -459,11 +467,6 @@ mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
}
-struct mlx5e_dma_info {
- struct page *page;
- dma_addr_t addr;
-};
-
struct mlx5e_wqe_frag_info {
struct mlx5e_dma_info *di;
u32 offset;
@@ -566,7 +569,6 @@ struct mlx5e_rq {
/* XDP */
struct bpf_prog *xdp_prog;
- unsigned int hw_mtu;
struct mlx5e_xdpsq xdpsq;
DECLARE_BITMAP(flags, 8);
struct page_pool *page_pool;
@@ -595,6 +597,9 @@ struct mlx5e_channel {
__be32 mkey_be;
u8 num_tc;
+ /* XDP_REDIRECT */
+ struct mlx5e_xdpsq xdpsq;
+
/* data path - accessed per napi poll */
struct irq_desc *irq_desc;
struct mlx5e_ch_stats *stats;
@@ -617,6 +622,8 @@ struct mlx5e_channel_stats {
struct mlx5e_ch_stats ch;
struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
struct mlx5e_rq_stats rq;
+ struct mlx5e_xdpsq_stats rq_xdpsq;
+ struct mlx5e_xdpsq_stats xdpsq;
} ____cacheline_aligned_in_smp;
enum mlx5e_traffic_types {
@@ -647,11 +654,6 @@ enum {
MLX5E_STATE_DESTROYING,
};
-struct mlx5e_vxlan_db {
- spinlock_t lock; /* protect vxlan table */
- struct radix_tree_root tree;
-};
-
struct mlx5e_l2_rule {
u8 addr[ETH_ALEN + 2];
struct mlx5_flow_handle *rule;
@@ -809,7 +811,6 @@ struct mlx5e_priv {
u32 tx_rates[MLX5E_MAX_NUM_SQS];
struct mlx5e_flow_steering fs;
- struct mlx5e_vxlan_db vxlan;
struct workqueue_struct *wq;
struct work_struct update_carrier_work;
@@ -876,14 +877,13 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
-bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
-void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
+void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
bool recycle);
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
@@ -892,7 +892,6 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
-void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
new file mode 100644
index 000000000000..1881468dbcfa
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -0,0 +1,305 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/bpf_trace.h>
+#include "en/xdp.h"
+
+static inline bool
+mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di,
+ struct xdp_buff *xdp)
+{
+ struct mlx5e_xdp_info xdpi;
+
+ xdpi.xdpf = convert_to_xdp_frame(xdp);
+ if (unlikely(!xdpi.xdpf))
+ return false;
+ xdpi.dma_addr = di->addr + (xdpi.xdpf->data - (void *)xdpi.xdpf);
+ dma_sync_single_for_device(sq->pdev, xdpi.dma_addr,
+ xdpi.xdpf->len, PCI_DMA_TODEVICE);
+ xdpi.di = *di;
+
+ return mlx5e_xmit_xdp_frame(sq, &xdpi);
+}
+
+/* returns true if packet was consumed by xdp */
+bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
+ void *va, u16 *rx_headroom, u32 *len)
+{
+ struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
+ struct xdp_buff xdp;
+ u32 act;
+ int err;
+
+ if (!prog)
+ return false;
+
+ xdp.data = va + *rx_headroom;
+ xdp_set_data_meta_invalid(&xdp);
+ xdp.data_end = xdp.data + *len;
+ xdp.data_hard_start = va;
+ xdp.rxq = &rq->xdp_rxq;
+
+ act = bpf_prog_run_xdp(prog, &xdp);
+ switch (act) {
+ case XDP_PASS:
+ *rx_headroom = xdp.data - xdp.data_hard_start;
+ *len = xdp.data_end - xdp.data;
+ return false;
+ case XDP_TX:
+ if (unlikely(!mlx5e_xmit_xdp_buff(&rq->xdpsq, di, &xdp)))
+ goto xdp_abort;
+ __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
+ return true;
+ case XDP_REDIRECT:
+ /* When XDP enabled then page-refcnt==1 here */
+ err = xdp_do_redirect(rq->netdev, &xdp, prog);
+ if (unlikely(err))
+ goto xdp_abort;
+ __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
+ rq->xdpsq.redirect_flush = true;
+ mlx5e_page_dma_unmap(rq, di);
+ rq->stats->xdp_redirect++;
+ return true;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ case XDP_ABORTED:
+xdp_abort:
+ trace_xdp_exception(rq->netdev, prog, act);
+ case XDP_DROP:
+ rq->stats->xdp_drop++;
+ return true;
+ }
+}
+
+bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
+ struct mlx5_wqe_data_seg *dseg = wqe->data;
+
+ struct xdp_frame *xdpf = xdpi->xdpf;
+ dma_addr_t dma_addr = xdpi->dma_addr;
+ unsigned int dma_len = xdpf->len;
+
+ struct mlx5e_xdpsq_stats *stats = sq->stats;
+
+ prefetchw(wqe);
+
+ if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || sq->hw_mtu < dma_len)) {
+ stats->err++;
+ return false;
+ }
+
+ if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) {
+ if (sq->doorbell) {
+ /* SQ is full, ring doorbell */
+ mlx5e_xmit_xdp_doorbell(sq);
+ sq->doorbell = false;
+ }
+ stats->full++;
+ return false;
+ }
+
+ cseg->fm_ce_se = 0;
+
+ /* copy the inline part if required */
+ if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
+ memcpy(eseg->inline_hdr.start, xdpf->data, MLX5E_XDP_MIN_INLINE);
+ eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
+ dma_len -= MLX5E_XDP_MIN_INLINE;
+ dma_addr += MLX5E_XDP_MIN_INLINE;
+ dseg++;
+ }
+
+ /* write the dma part */
+ dseg->addr = cpu_to_be64(dma_addr);
+ dseg->byte_count = cpu_to_be32(dma_len);
+
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
+
+ /* move page to reference to sq responsibility,
+ * and mark so it's not put back in page-cache.
+ */
+ sq->db.xdpi[pi] = *xdpi;
+ sq->pc++;
+
+ sq->doorbell = true;
+
+ stats->xmit++;
+ return true;
+}
+
+bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
+{
+ struct mlx5e_xdpsq *sq;
+ struct mlx5_cqe64 *cqe;
+ struct mlx5e_rq *rq;
+ bool is_redirect;
+ u16 sqcc;
+ int i;
+
+ sq = container_of(cq, struct mlx5e_xdpsq, cq);
+
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
+ return false;
+
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
+ if (!cqe)
+ return false;
+
+ is_redirect = test_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state);
+ rq = container_of(sq, struct mlx5e_rq, xdpsq);
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ sqcc = sq->cc;
+
+ i = 0;
+ do {
+ u16 wqe_counter;
+ bool last_wqe;
+
+ mlx5_cqwq_pop(&cq->wq);
+
+ wqe_counter = be16_to_cpu(cqe->wqe_counter);
+
+ do {
+ u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
+ struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci];
+
+ last_wqe = (sqcc == wqe_counter);
+ sqcc++;
+
+ if (is_redirect) {
+ xdp_return_frame(xdpi->xdpf);
+ dma_unmap_single(sq->pdev, xdpi->dma_addr,
+ xdpi->xdpf->len, DMA_TO_DEVICE);
+ } else {
+ /* Recycle RX page */
+ mlx5e_page_release(rq, &xdpi->di, true);
+ }
+ } while (!last_wqe);
+ } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
+
+ sq->stats->cqes += i;
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ sq->cc = sqcc;
+ return (i == MLX5E_TX_CQ_POLL_BUDGET);
+}
+
+void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
+{
+ struct mlx5e_rq *rq;
+ bool is_redirect;
+
+ is_redirect = test_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state);
+ rq = is_redirect ? NULL : container_of(sq, struct mlx5e_rq, xdpsq);
+
+ while (sq->cc != sq->pc) {
+ u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
+ struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci];
+
+ sq->cc++;
+
+ if (is_redirect) {
+ xdp_return_frame(xdpi->xdpf);
+ dma_unmap_single(sq->pdev, xdpi->dma_addr,
+ xdpi->xdpf->len, DMA_TO_DEVICE);
+ } else {
+ /* Recycle RX page */
+ mlx5e_page_release(rq, &xdpi->di, false);
+ }
+ }
+}
+
+int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_xdpsq *sq;
+ int drops = 0;
+ int sq_num;
+ int i;
+
+ if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state)))
+ return -ENETDOWN;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ sq_num = smp_processor_id();
+
+ if (unlikely(sq_num >= priv->channels.num))
+ return -ENXIO;
+
+ sq = &priv->channels.c[sq_num]->xdpsq;
+
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
+ return -ENETDOWN;
+
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *xdpf = frames[i];
+ struct mlx5e_xdp_info xdpi;
+
+ xdpi.dma_addr = dma_map_single(sq->pdev, xdpf->data, xdpf->len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(sq->pdev, xdpi.dma_addr))) {
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+ continue;
+ }
+
+ xdpi.xdpf = xdpf;
+
+ if (unlikely(!mlx5e_xmit_xdp_frame(sq, &xdpi))) {
+ dma_unmap_single(sq->pdev, xdpi.dma_addr,
+ xdpf->len, DMA_TO_DEVICE);
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+ }
+ }
+
+ if (flags & XDP_XMIT_FLUSH)
+ mlx5e_xmit_xdp_doorbell(sq);
+
+ return n - drops;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
new file mode 100644
index 000000000000..6dfab045925f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __MLX5_EN_XDP_H__
+#define __MLX5_EN_XDP_H__
+
+#include "en.h"
+
+#define MLX5E_XDP_MAX_MTU ((int)(PAGE_SIZE - \
+ MLX5_SKB_FRAG_SZ(XDP_PACKET_HEADROOM)))
+#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
+#define MLX5E_XDP_TX_DS_COUNT \
+ ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
+
+bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
+ void *va, u16 *rx_headroom, u32 *len);
+bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
+void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
+
+bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi);
+int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
+
+static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct mlx5e_tx_wqe *wqe;
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc - 1); /* last pi */
+
+ wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+
+ mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &wqe->ctrl);
+}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 75e4308ba786..d258bb679271 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -381,14 +381,14 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
HLIST_HEAD(del_list);
spin_lock_bh(&priv->fs.arfs.arfs_lock);
mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
- if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
- break;
if (!work_pending(&arfs_rule->arfs_work) &&
rps_may_expire_flow(priv->netdev,
arfs_rule->rxq, arfs_rule->flow_id,
arfs_rule->filter_id)) {
hlist_del_init(&arfs_rule->hlist);
hlist_add_head(&arfs_rule->hlist, &del_list);
+ if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
+ break;
}
}
spin_unlock_bh(&priv->fs.arfs.arfs_lock);
@@ -711,6 +711,9 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
skb->protocol != htons(ETH_P_IPV6))
return -EPROTONOSUPPORT;
+ if (skb->encapsulation)
+ return -EPROTONOSUPPORT;
+
arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
if (!arfs_t)
return -EPROTONOSUPPORT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 0a52f31fef37..e33afa8d2417 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -275,7 +275,8 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
}
static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
- struct ieee_ets *ets)
+ struct ieee_ets *ets,
+ bool zero_sum_allowed)
{
bool have_ets_tc = false;
int bw_sum = 0;
@@ -300,8 +301,9 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
}
if (have_ets_tc && bw_sum != 100) {
- netdev_err(netdev,
- "Failed to validate ETS: BW sum is illegal\n");
+ if (bw_sum || (!bw_sum && !zero_sum_allowed))
+ netdev_err(netdev,
+ "Failed to validate ETS: BW sum is illegal\n");
return -EINVAL;
}
return 0;
@@ -316,7 +318,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
if (!MLX5_CAP_GEN(priv->mdev, ets))
return -EOPNOTSUPP;
- err = mlx5e_dbcnl_validate_ets(netdev, ets);
+ err = mlx5e_dbcnl_validate_ets(netdev, ets, false);
if (err)
return err;
@@ -642,12 +644,9 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
ets.prio_tc[i]);
}
- err = mlx5e_dbcnl_validate_ets(netdev, &ets);
- if (err) {
- netdev_err(netdev,
- "%s, Failed to validate ETS: %d\n", __func__, err);
+ err = mlx5e_dbcnl_validate_ets(netdev, &ets, true);
+ if (err)
goto out;
- }
err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
if (err) {
@@ -1173,6 +1172,8 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
int err;
+ priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP;
+
if (!MLX5_DSCP_SUPPORTED(mdev))
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index dccde18f6170..a2fb21ca5767 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -45,8 +45,9 @@
#include "en_accel/tls.h"
#include "accel/ipsec.h"
#include "accel/tls.h"
-#include "vxlan.h"
+#include "lib/vxlan.h"
#include "en/port.h"
+#include "en/xdp.h"
struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)];
@@ -96,14 +97,19 @@ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params)
{
- if (!params->xdp_prog) {
- u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- u16 rq_headroom = MLX5_RX_HEADROOM + NET_IP_ALIGN;
+ u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ u16 linear_rq_headroom = params->xdp_prog ?
+ XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
+ u32 frag_sz;
- return MLX5_SKB_FRAG_SZ(rq_headroom + hw_mtu);
- }
+ linear_rq_headroom += NET_IP_ALIGN;
+
+ frag_sz = MLX5_SKB_FRAG_SZ(linear_rq_headroom + hw_mtu);
- return PAGE_SIZE;
+ if (params->xdp_prog && frag_sz < PAGE_SIZE)
+ frag_sz = PAGE_SIZE;
+
+ return frag_sz;
}
static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params)
@@ -485,7 +491,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->channel = c;
rq->ix = c->ix;
rq->mdev = mdev;
- rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->stats = &c->priv->channel_stats[c->ix].rq;
rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
@@ -877,7 +882,7 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
/* UMR WQE (if in progress) is always at wq->head */
if (rq->mpwqe.umr_in_progress)
- mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
+ rq->dealloc_wqe(rq, wq->head);
while (!mlx5_wq_ll_is_empty(wq)) {
struct mlx5e_rx_wqe_ll *wqe;
@@ -963,16 +968,16 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq)
static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
{
- kvfree(sq->db.di);
+ kvfree(sq->db.xdpi);
}
static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
- sq->db.di = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.di)),
- GFP_KERNEL, numa);
- if (!sq->db.di) {
+ sq->db.xdpi = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.xdpi)),
+ GFP_KERNEL, numa);
+ if (!sq->db.xdpi) {
mlx5e_free_xdpsq_db(sq);
return -ENOMEM;
}
@@ -983,7 +988,8 @@ static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_sq_param *param,
- struct mlx5e_xdpsq *sq)
+ struct mlx5e_xdpsq *sq,
+ bool is_redirect)
{
void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
struct mlx5_core_dev *mdev = c->mdev;
@@ -995,6 +1001,10 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
sq->channel = c;
sq->uar_map = mdev->mlx5e_res.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
+ sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ sq->stats = is_redirect ?
+ &c->priv->channel_stats[c->ix].xdpsq :
+ &c->priv->channel_stats[c->ix].rq_xdpsq;
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1524,7 +1534,8 @@ static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_sq_param *param,
- struct mlx5e_xdpsq *sq)
+ struct mlx5e_xdpsq *sq,
+ bool is_redirect)
{
unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
struct mlx5e_create_sq_param csp = {};
@@ -1532,7 +1543,7 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
int err;
int i;
- err = mlx5e_alloc_xdpsq(c, params, param, sq);
+ err = mlx5e_alloc_xdpsq(c, params, param, sq, is_redirect);
if (err)
return err;
@@ -1541,6 +1552,8 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
+ if (is_redirect)
+ set_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state);
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
if (err)
@@ -1923,10 +1936,14 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
if (err)
goto err_close_icosq_cq;
- err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
+ err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xdpsq.cq);
if (err)
goto err_close_tx_cqs;
+ err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
+ if (err)
+ goto err_close_xdp_tx_cqs;
+
/* XDP SQ CQ params are same as normal TXQ sq CQ params */
err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
&cparam->tx_cq, &c->rq.xdpsq.cq) : 0;
@@ -1943,7 +1960,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
if (err)
goto err_close_icosq;
- err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq) : 0;
+ err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq, false) : 0;
if (err)
goto err_close_sqs;
@@ -1951,9 +1968,17 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
if (err)
goto err_close_xdp_sq;
+ err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->xdpsq, true);
+ if (err)
+ goto err_close_rq;
+
*cp = c;
return 0;
+
+err_close_rq:
+ mlx5e_close_rq(&c->rq);
+
err_close_xdp_sq:
if (c->xdp)
mlx5e_close_xdpsq(&c->rq.xdpsq);
@@ -1972,6 +1997,9 @@ err_disable_napi:
err_close_rx_cq:
mlx5e_close_cq(&c->rq.cq);
+err_close_xdp_tx_cqs:
+ mlx5e_close_cq(&c->xdpsq.cq);
+
err_close_tx_cqs:
mlx5e_close_tx_cqs(c);
@@ -2006,6 +2034,7 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
static void mlx5e_close_channel(struct mlx5e_channel *c)
{
+ mlx5e_close_xdpsq(&c->xdpsq);
mlx5e_close_rq(&c->rq);
if (c->xdp)
mlx5e_close_xdpsq(&c->rq.xdpsq);
@@ -2015,6 +2044,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
if (c->xdp)
mlx5e_close_cq(&c->rq.xdpsq.cq);
mlx5e_close_cq(&c->rq.cq);
+ mlx5e_close_cq(&c->xdpsq.cq);
mlx5e_close_tx_cqs(c);
mlx5e_close_cq(&c->icosq.cq);
netif_napi_del(&c->napi);
@@ -2944,7 +2974,7 @@ int mlx5e_open(struct net_device *netdev)
mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
mutex_unlock(&priv->state_lock);
- if (mlx5e_vxlan_allowed(priv->mdev))
+ if (mlx5_vxlan_allowed(priv->mdev->vxlan))
udp_tunnel_get_rx_info(netdev);
return err;
@@ -3707,6 +3737,14 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
new_channels.params = *params;
new_channels.params.sw_mtu = new_mtu;
+ if (params->xdp_prog &&
+ !mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
+ netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n",
+ new_mtu, MLX5E_XDP_MAX_MTU);
+ err = -EINVAL;
+ goto out;
+ }
+
if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params);
u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params);
@@ -3716,7 +3754,8 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
if (!reset) {
params->sw_mtu = new_mtu;
- set_mtu_cb(priv);
+ if (set_mtu_cb)
+ set_mtu_cb(priv);
netdev->mtu = params->sw_mtu;
goto out;
}
@@ -3931,6 +3970,57 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
}
#endif
+struct mlx5e_vxlan_work {
+ struct work_struct work;
+ struct mlx5e_priv *priv;
+ u16 port;
+};
+
+static void mlx5e_vxlan_add_work(struct work_struct *work)
+{
+ struct mlx5e_vxlan_work *vxlan_work =
+ container_of(work, struct mlx5e_vxlan_work, work);
+ struct mlx5e_priv *priv = vxlan_work->priv;
+ u16 port = vxlan_work->port;
+
+ mutex_lock(&priv->state_lock);
+ mlx5_vxlan_add_port(priv->mdev->vxlan, port);
+ mutex_unlock(&priv->state_lock);
+
+ kfree(vxlan_work);
+}
+
+static void mlx5e_vxlan_del_work(struct work_struct *work)
+{
+ struct mlx5e_vxlan_work *vxlan_work =
+ container_of(work, struct mlx5e_vxlan_work, work);
+ struct mlx5e_priv *priv = vxlan_work->priv;
+ u16 port = vxlan_work->port;
+
+ mutex_lock(&priv->state_lock);
+ mlx5_vxlan_del_port(priv->mdev->vxlan, port);
+ mutex_unlock(&priv->state_lock);
+ kfree(vxlan_work);
+}
+
+static void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, u16 port, int add)
+{
+ struct mlx5e_vxlan_work *vxlan_work;
+
+ vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
+ if (!vxlan_work)
+ return;
+
+ if (add)
+ INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_work);
+ else
+ INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_work);
+
+ vxlan_work->priv = priv;
+ vxlan_work->port = port;
+ queue_work(priv->wq, &vxlan_work->work);
+}
+
static void mlx5e_add_vxlan_port(struct net_device *netdev,
struct udp_tunnel_info *ti)
{
@@ -3939,10 +4029,10 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev,
if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
return;
- if (!mlx5e_vxlan_allowed(priv->mdev))
+ if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
return;
- mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
+ mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 1);
}
static void mlx5e_del_vxlan_port(struct net_device *netdev,
@@ -3953,10 +4043,10 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev,
if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
return;
- if (!mlx5e_vxlan_allowed(priv->mdev))
+ if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
return;
- mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0);
+ mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 0);
}
static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
@@ -3987,7 +4077,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
port = be16_to_cpu(udph->dest);
/* Verify if UDP port is being offloaded by HW */
- if (mlx5e_vxlan_lookup_port(priv, port))
+ if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
return features;
}
@@ -4094,26 +4184,47 @@ static void mlx5e_tx_timeout(struct net_device *dev)
queue_work(priv->wq, &priv->tx_timeout_work);
}
+static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
+{
+ struct net_device *netdev = priv->netdev;
+ struct mlx5e_channels new_channels = {};
+
+ if (priv->channels.params.lro_en) {
+ netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
+ return -EINVAL;
+ }
+
+ if (MLX5_IPSEC_DEV(priv->mdev)) {
+ netdev_warn(netdev, "can't set XDP with IPSec offload\n");
+ return -EINVAL;
+ }
+
+ new_channels.params = priv->channels.params;
+ new_channels.params.xdp_prog = prog;
+
+ if (!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
+ netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
+ new_channels.params.sw_mtu, MLX5E_XDP_MAX_MTU);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct bpf_prog *old_prog;
- int err = 0;
bool reset, was_opened;
+ int err = 0;
int i;
mutex_lock(&priv->state_lock);
- if ((netdev->features & NETIF_F_LRO) && prog) {
- netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
- err = -EINVAL;
- goto unlock;
- }
-
- if ((netdev->features & NETIF_F_HW_ESP) && prog) {
- netdev_warn(netdev, "can't set XDP with IPSec offload\n");
- err = -EINVAL;
- goto unlock;
+ if (prog) {
+ err = mlx5e_xdp_allowed(priv, prog);
+ if (err)
+ goto unlock;
}
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
@@ -4242,6 +4353,7 @@ static const struct net_device_ops mlx5e_netdev_ops = {
#endif
.ndo_tx_timeout = mlx5e_tx_timeout,
.ndo_bpf = mlx5e_xdp,
+ .ndo_xdp_xmit = mlx5e_xdp_xmit,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mlx5e_netpoll,
#endif
@@ -4537,7 +4649,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
- if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
+ if (mlx5_vxlan_allowed(mdev->vxlan) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
netdev->hw_enc_features |= NETIF_F_IP_CSUM;
netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
@@ -4545,7 +4657,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
}
- if (mlx5e_vxlan_allowed(mdev)) {
+ if (mlx5_vxlan_allowed(mdev->vxlan)) {
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
@@ -4656,14 +4768,12 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
mlx5e_build_nic_netdev(netdev);
mlx5e_build_tc2txq_maps(priv);
- mlx5e_vxlan_init(priv);
}
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
mlx5e_tls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
- mlx5e_vxlan_cleanup(priv);
}
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 1d5295ee863c..15d8ae28c040 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -34,7 +34,6 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
-#include <linux/bpf_trace.h>
#include <net/busy_poll.h>
#include <net/ip6_checksum.h>
#include <net/page_pool.h>
@@ -46,6 +45,7 @@
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/tls_rxtx.h"
#include "lib/clock.h"
+#include "en/xdp.h"
static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
{
@@ -239,8 +239,7 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
return 0;
}
-static void mlx5e_page_dma_unmap(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info)
+void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info)
{
dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir);
}
@@ -277,10 +276,11 @@ static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
}
static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
- struct mlx5e_wqe_frag_info *frag)
+ struct mlx5e_wqe_frag_info *frag,
+ bool recycle)
{
if (frag->last_in_page)
- mlx5e_page_release(rq, frag->di, true);
+ mlx5e_page_release(rq, frag->di, recycle);
}
static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
@@ -308,25 +308,26 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
free_frags:
while (--i >= 0)
- mlx5e_put_rx_frag(rq, --frag);
+ mlx5e_put_rx_frag(rq, --frag, true);
return err;
}
static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
- struct mlx5e_wqe_frag_info *wi)
+ struct mlx5e_wqe_frag_info *wi,
+ bool recycle)
{
int i;
for (i = 0; i < rq->wqe.info.num_frags; i++, wi++)
- mlx5e_put_rx_frag(rq, wi);
+ mlx5e_put_rx_frag(rq, wi, recycle);
}
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix);
- mlx5e_free_rx_wqe(rq, wi);
+ mlx5e_free_rx_wqe(rq, wi, false);
}
static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk)
@@ -396,7 +397,8 @@ mlx5e_copy_skb_header_mpwqe(struct device *pdev,
}
}
-void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
+static void
+mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle)
{
const bool no_xdp_xmit =
bitmap_empty(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
@@ -405,7 +407,7 @@ void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++)
if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap))
- mlx5e_page_release(rq, &dma_info[i], true);
+ mlx5e_page_release(rq, &dma_info[i], recycle);
}
static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
@@ -505,8 +507,8 @@ err_unmap:
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
-
- mlx5e_free_rx_mpwqe(rq, wi);
+ /* Don't recycle, this function is called on rq/netdev close */
+ mlx5e_free_rx_mpwqe(rq, wi, false);
}
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
@@ -847,135 +849,6 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
}
-static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
-{
- struct mlx5_wq_cyc *wq = &sq->wq;
- struct mlx5e_tx_wqe *wqe;
- u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc - 1); /* last pi */
-
- wqe = mlx5_wq_cyc_get_wqe(wq, pi);
-
- mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &wqe->ctrl);
-}
-
-static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *di,
- const struct xdp_buff *xdp)
-{
- struct mlx5e_xdpsq *sq = &rq->xdpsq;
- struct mlx5_wq_cyc *wq = &sq->wq;
- u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
-
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
- struct mlx5_wqe_data_seg *dseg;
-
- ptrdiff_t data_offset = xdp->data - xdp->data_hard_start;
- dma_addr_t dma_addr = di->addr + data_offset;
- unsigned int dma_len = xdp->data_end - xdp->data;
-
- struct mlx5e_rq_stats *stats = rq->stats;
-
- prefetchw(wqe);
-
- if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || rq->hw_mtu < dma_len)) {
- stats->xdp_drop++;
- return false;
- }
-
- if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) {
- if (sq->db.doorbell) {
- /* SQ is full, ring doorbell */
- mlx5e_xmit_xdp_doorbell(sq);
- sq->db.doorbell = false;
- }
- stats->xdp_tx_full++;
- return false;
- }
-
- dma_sync_single_for_device(sq->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
-
- cseg->fm_ce_se = 0;
-
- dseg = (struct mlx5_wqe_data_seg *)eseg + 1;
-
- /* copy the inline part if required */
- if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
- memcpy(eseg->inline_hdr.start, xdp->data, MLX5E_XDP_MIN_INLINE);
- eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
- dma_len -= MLX5E_XDP_MIN_INLINE;
- dma_addr += MLX5E_XDP_MIN_INLINE;
- dseg++;
- }
-
- /* write the dma part */
- dseg->addr = cpu_to_be64(dma_addr);
- dseg->byte_count = cpu_to_be32(dma_len);
-
- cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
-
- /* move page to reference to sq responsibility,
- * and mark so it's not put back in page-cache.
- */
- __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
- sq->db.di[pi] = *di;
- sq->pc++;
-
- sq->db.doorbell = true;
-
- stats->xdp_tx++;
- return true;
-}
-
-/* returns true if packet was consumed by xdp */
-static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *di,
- void *va, u16 *rx_headroom, u32 *len)
-{
- struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
- struct xdp_buff xdp;
- u32 act;
- int err;
-
- if (!prog)
- return false;
-
- xdp.data = va + *rx_headroom;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + *len;
- xdp.data_hard_start = va;
- xdp.rxq = &rq->xdp_rxq;
-
- act = bpf_prog_run_xdp(prog, &xdp);
- switch (act) {
- case XDP_PASS:
- *rx_headroom = xdp.data - xdp.data_hard_start;
- *len = xdp.data_end - xdp.data;
- return false;
- case XDP_TX:
- if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp)))
- trace_xdp_exception(rq->netdev, prog, act);
- return true;
- case XDP_REDIRECT:
- /* When XDP enabled then page-refcnt==1 here */
- err = xdp_do_redirect(rq->netdev, &xdp, prog);
- if (!err) {
- __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
- rq->xdpsq.db.redirect_flush = true;
- mlx5e_page_dma_unmap(rq, di);
- }
- return true;
- default:
- bpf_warn_invalid_xdp_action(act);
- case XDP_ABORTED:
- trace_xdp_exception(rq->netdev, prog, act);
- case XDP_DROP:
- rq->stats->xdp_drop++;
- return true;
- }
-}
-
static inline
struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
u32 frag_size, u16 headroom,
@@ -1113,7 +986,7 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
napi_gro_receive(rq->cq.napi, skb);
free_wqe:
- mlx5e_free_rx_wqe(rq, wi);
+ mlx5e_free_rx_wqe(rq, wi, true);
wq_cyc_pop:
mlx5_wq_cyc_pop(wq);
}
@@ -1155,7 +1028,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
napi_gro_receive(rq->cq.napi, skb);
free_wqe:
- mlx5e_free_rx_wqe(rq, wi);
+ mlx5e_free_rx_wqe(rq, wi, true);
wq_cyc_pop:
mlx5_wq_cyc_pop(wq);
}
@@ -1226,6 +1099,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
frag_size, DMA_FROM_DEVICE);
+ prefetchw(va); /* xdp_frame data area */
prefetch(data);
rcu_read_lock();
@@ -1292,7 +1166,7 @@ mpwrq_cqe_out:
wq = &rq->mpwqe.wq;
wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
- mlx5e_free_rx_mpwqe(rq, wi);
+ mlx5e_free_rx_mpwqe(rq, wi, true);
mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
}
@@ -1328,14 +1202,14 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
rq->handle_rx_cqe(rq, cqe);
} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
- if (xdpsq->db.doorbell) {
+ if (xdpsq->doorbell) {
mlx5e_xmit_xdp_doorbell(xdpsq);
- xdpsq->db.doorbell = false;
+ xdpsq->doorbell = false;
}
- if (xdpsq->db.redirect_flush) {
+ if (xdpsq->redirect_flush) {
xdp_do_flush_map();
- xdpsq->db.redirect_flush = false;
+ xdpsq->redirect_flush = false;
}
mlx5_cqwq_update_db_record(&cq->wq);
@@ -1346,80 +1220,6 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
return work_done;
}
-bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
-{
- struct mlx5e_xdpsq *sq;
- struct mlx5_cqe64 *cqe;
- struct mlx5e_rq *rq;
- u16 sqcc;
- int i;
-
- sq = container_of(cq, struct mlx5e_xdpsq, cq);
-
- if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
- return false;
-
- cqe = mlx5_cqwq_get_cqe(&cq->wq);
- if (!cqe)
- return false;
-
- rq = container_of(sq, struct mlx5e_rq, xdpsq);
-
- /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
- * otherwise a cq overrun may occur
- */
- sqcc = sq->cc;
-
- i = 0;
- do {
- u16 wqe_counter;
- bool last_wqe;
-
- mlx5_cqwq_pop(&cq->wq);
-
- wqe_counter = be16_to_cpu(cqe->wqe_counter);
-
- do {
- struct mlx5e_dma_info *di;
- u16 ci;
-
- last_wqe = (sqcc == wqe_counter);
-
- ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
- di = &sq->db.di[ci];
-
- sqcc++;
- /* Recycle RX page */
- mlx5e_page_release(rq, di, true);
- } while (!last_wqe);
- } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
-
- rq->stats->xdp_tx_cqe += i;
-
- mlx5_cqwq_update_db_record(&cq->wq);
-
- /* ensure cq space is freed before enabling more cqes */
- wmb();
-
- sq->cc = sqcc;
- return (i == MLX5E_TX_CQ_POLL_BUDGET);
-}
-
-void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
-{
- struct mlx5e_rq *rq = container_of(sq, struct mlx5e_rq, xdpsq);
- struct mlx5e_dma_info *di;
- u16 ci;
-
- while (sq->cc != sq->pc) {
- ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
- di = &sq->db.di[ci];
- sq->cc++;
-
- mlx5e_page_release(rq, di, false);
- }
-}
-
#ifdef CONFIG_MLX5_CORE_IPOIB
#define MLX5_IB_GRH_DGID_OFFSET 24
@@ -1521,7 +1321,7 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
napi_gro_receive(rq->cq.napi, skb);
wq_free_wqe:
- mlx5e_free_rx_wqe(rq, wi);
+ mlx5e_free_rx_wqe(rq, wi, true);
mlx5_wq_cyc_pop(wq);
}
@@ -1544,19 +1344,19 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
if (unlikely(!skb)) {
/* a DROP, save the page-reuse checks */
- mlx5e_free_rx_wqe(rq, wi);
+ mlx5e_free_rx_wqe(rq, wi, true);
goto wq_cyc_pop;
}
skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt);
if (unlikely(!skb)) {
- mlx5e_free_rx_wqe(rq, wi);
+ mlx5e_free_rx_wqe(rq, wi, true);
goto wq_cyc_pop;
}
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
napi_gro_receive(rq->cq.napi, skb);
- mlx5e_free_rx_wqe(rq, wi);
+ mlx5e_free_rx_wqe(rq, wi, true);
wq_cyc_pop:
mlx5_wq_cyc_pop(wq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index c0507fada0be..12fdf5c92b67 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -59,9 +59,11 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
@@ -73,6 +75,10 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
@@ -128,6 +134,8 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) {
struct mlx5e_channel_stats *channel_stats =
&priv->channel_stats[i];
+ struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq;
+ struct mlx5e_xdpsq_stats *xdpsq_stats = &channel_stats->rq_xdpsq;
struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
struct mlx5e_ch_stats *ch_stats = &channel_stats->ch;
int j;
@@ -141,10 +149,12 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_csum_complete += rq_stats->csum_complete;
s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
- s->rx_xdp_drop += rq_stats->xdp_drop;
- s->rx_xdp_tx += rq_stats->xdp_tx;
- s->rx_xdp_tx_cqe += rq_stats->xdp_tx_cqe;
- s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
+ s->rx_xdp_drop += rq_stats->xdp_drop;
+ s->rx_xdp_redirect += rq_stats->xdp_redirect;
+ s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
+ s->rx_xdp_tx_full += xdpsq_stats->full;
+ s->rx_xdp_tx_err += xdpsq_stats->err;
+ s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
s->rx_wqe_err += rq_stats->wqe_err;
s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
@@ -162,7 +172,12 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->ch_poll += ch_stats->poll;
s->ch_arm += ch_stats->arm;
s->ch_aff_change += ch_stats->aff_change;
- s->ch_eq_rearm += ch_stats->eq_rearm;
+ s->ch_eq_rearm += ch_stats->eq_rearm;
+ /* xdp redirect */
+ s->tx_xdp_xmit += xdpsq_red_stats->xmit;
+ s->tx_xdp_full += xdpsq_red_stats->full;
+ s->tx_xdp_err += xdpsq_red_stats->err;
+ s->tx_xdp_cqes += xdpsq_red_stats->cqes;
for (j = 0; j < priv->max_opened_tc; j++) {
struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
@@ -1126,9 +1141,7 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_cqe) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
@@ -1168,6 +1181,20 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
};
+static const struct counter_desc rq_xdpsq_stats_desc[] = {
+ { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
+ { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
+ { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
+ { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
+};
+
+static const struct counter_desc xdpsq_stats_desc[] = {
+ { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
+ { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
+ { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
+ { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
+};
+
static const struct counter_desc ch_stats_desc[] = {
{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
@@ -1178,6 +1205,8 @@ static const struct counter_desc ch_stats_desc[] = {
#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
+#define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc)
+#define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc)
#define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
@@ -1186,7 +1215,9 @@ static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
return (NUM_RQ_STATS * max_nch) +
(NUM_CH_STATS * max_nch) +
- (NUM_SQ_STATS * max_nch * priv->max_opened_tc);
+ (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
+ (NUM_RQ_XDPSQ_STATS * max_nch) +
+ (NUM_XDPSQ_STATS * max_nch);
}
static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
@@ -1200,9 +1231,14 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
sprintf(data + (idx++) * ETH_GSTRING_LEN,
ch_stats_desc[j].format, i);
- for (i = 0; i < max_nch; i++)
+ for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_RQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i);
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ rq_stats_desc[j].format, i);
+ for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ rq_xdpsq_stats_desc[j].format, i);
+ }
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < max_nch; i++)
@@ -1211,6 +1247,11 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
sq_stats_desc[j].format,
priv->channel_tc2txq[i][tc]);
+ for (i = 0; i < max_nch; i++)
+ for (j = 0; j < NUM_XDPSQ_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ xdpsq_stats_desc[j].format, i);
+
return idx;
}
@@ -1226,11 +1267,16 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
ch_stats_desc, j);
- for (i = 0; i < max_nch; i++)
+ for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_RQ_STATS; j++)
data[idx++] =
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
rq_stats_desc, j);
+ for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq,
+ rq_xdpsq_stats_desc, j);
+ }
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < max_nch; i++)
@@ -1239,6 +1285,12 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
sq_stats_desc, j);
+ for (i = 0; i < max_nch; i++)
+ for (j = 0; j < NUM_XDPSQ_STATS; j++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq,
+ xdpsq_stats_desc, j);
+
return idx;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index fc3f66003edd..a4c035aedd46 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -44,6 +44,8 @@
#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_XDPSQ_STAT(type, fld) "tx%d_xdp_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_RQ_XDPSQ_STAT(type, fld) "rx%d_xdp_tx_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld)
struct counter_desc {
@@ -70,9 +72,11 @@ struct mlx5e_sw_stats {
u64 rx_csum_complete;
u64 rx_csum_unnecessary_inner;
u64 rx_xdp_drop;
- u64 rx_xdp_tx;
- u64 rx_xdp_tx_cqe;
+ u64 rx_xdp_redirect;
+ u64 rx_xdp_tx_xmit;
u64 rx_xdp_tx_full;
+ u64 rx_xdp_tx_err;
+ u64 rx_xdp_tx_cqe;
u64 tx_csum_none;
u64 tx_csum_partial;
u64 tx_csum_partial_inner;
@@ -84,6 +88,10 @@ struct mlx5e_sw_stats {
u64 tx_queue_wake;
u64 tx_udp_seg_rem;
u64 tx_cqe_err;
+ u64 tx_xdp_xmit;
+ u64 tx_xdp_full;
+ u64 tx_xdp_err;
+ u64 tx_xdp_cqes;
u64 rx_wqe_err;
u64 rx_mpwqe_filler_cqes;
u64 rx_mpwqe_filler_strides;
@@ -178,9 +186,7 @@ struct mlx5e_rq_stats {
u64 lro_bytes;
u64 removed_vlan_packets;
u64 xdp_drop;
- u64 xdp_tx;
- u64 xdp_tx_cqe;
- u64 xdp_tx_full;
+ u64 xdp_redirect;
u64 wqe_err;
u64 mpwqe_filler_cqes;
u64 mpwqe_filler_strides;
@@ -225,6 +231,14 @@ struct mlx5e_sq_stats {
u64 cqe_err;
};
+struct mlx5e_xdpsq_stats {
+ u64 xmit;
+ u64 full;
+ u64 err;
+ /* dirtied @completion */
+ u64 cqes ____cacheline_aligned_in_smp;
+};
+
struct mlx5e_ch_stats {
u64 events;
u64 poll;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index e9888d6c1f7c..c28fe469b04a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -50,7 +50,7 @@
#include "en_rep.h"
#include "en_tc.h"
#include "eswitch.h"
-#include "vxlan.h"
+#include "lib/vxlan.h"
#include "fs_core.h"
#include "en/port.h"
@@ -1124,16 +1124,12 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ENC_PORTS,
f->mask);
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- struct net_device *up_dev = uplink_rpriv->netdev;
- struct mlx5e_priv *up_priv = netdev_priv(up_dev);
/* Full udp dst port must be given */
if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
goto vxlan_match_offload_err;
- if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
+ if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->dst)) &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
parse_vxlan_attr(spec, f);
else {
@@ -1211,6 +1207,26 @@ vxlan_match_offload_err:
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
}
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
+ struct flow_dissector_key_ip *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IP,
+ f->key);
+ struct flow_dissector_key_ip *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IP,
+ f->mask);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
+ }
+
/* Enforce DMAC when offloading incoming tunneled flows.
* Flow counters require a match on the DMAC.
*/
@@ -1259,7 +1275,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_TCP) |
- BIT(FLOW_DISSECTOR_KEY_IP))) {
+ BIT(FLOW_DISSECTOR_KEY_IP) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_IP))) {
netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
f->dissector->used_keys);
return -EOPNOTSUPP;
@@ -2004,6 +2021,10 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
else
actions = flow->nic_attr->action;
+ if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
+ !(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP))
+ return false;
+
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
return modify_header_match_supported(&parse_attr->spec, exts);
@@ -2125,7 +2146,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
struct net_device **out_dev,
struct flowi4 *fl4,
struct neighbour **out_n,
- int *out_ttl)
+ u8 *out_ttl)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *uplink_rpriv;
@@ -2149,7 +2170,8 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
else
*out_dev = rt->dst.dev;
- *out_ttl = ip4_dst_hoplimit(&rt->dst);
+ if (!(*out_ttl))
+ *out_ttl = ip4_dst_hoplimit(&rt->dst);
n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
ip_rt_put(rt);
if (!n)
@@ -2178,7 +2200,7 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
struct net_device **out_dev,
struct flowi6 *fl6,
struct neighbour **out_n,
- int *out_ttl)
+ u8 *out_ttl)
{
struct neighbour *n = NULL;
struct dst_entry *dst;
@@ -2193,7 +2215,8 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
if (ret < 0)
return ret;
- *out_ttl = ip6_dst_hoplimit(dst);
+ if (!(*out_ttl))
+ *out_ttl = ip6_dst_hoplimit(dst);
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
/* if the egress device isn't on the same HW e-switch, we use the uplink */
@@ -2217,7 +2240,7 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
static void gen_vxlan_header_ipv4(struct net_device *out_dev,
char buf[], int encap_size,
unsigned char h_dest[ETH_ALEN],
- int ttl,
+ u8 tos, u8 ttl,
__be32 daddr,
__be32 saddr,
__be16 udp_dst_port,
@@ -2237,6 +2260,7 @@ static void gen_vxlan_header_ipv4(struct net_device *out_dev,
ip->daddr = daddr;
ip->saddr = saddr;
+ ip->tos = tos;
ip->ttl = ttl;
ip->protocol = IPPROTO_UDP;
ip->version = 0x4;
@@ -2250,7 +2274,7 @@ static void gen_vxlan_header_ipv4(struct net_device *out_dev,
static void gen_vxlan_header_ipv6(struct net_device *out_dev,
char buf[], int encap_size,
unsigned char h_dest[ETH_ALEN],
- int ttl,
+ u8 tos, u8 ttl,
struct in6_addr *daddr,
struct in6_addr *saddr,
__be16 udp_dst_port,
@@ -2267,7 +2291,7 @@ static void gen_vxlan_header_ipv6(struct net_device *out_dev,
ether_addr_copy(eth->h_source, out_dev->dev_addr);
eth->h_proto = htons(ETH_P_IPV6);
- ip6_flow_hdr(ip6h, 0, 0);
+ ip6_flow_hdr(ip6h, tos, 0);
/* the HW fills up ipv6 payload len */
ip6h->nexthdr = IPPROTO_UDP;
ip6h->hop_limit = ttl;
@@ -2289,9 +2313,9 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
struct net_device *out_dev;
struct neighbour *n = NULL;
struct flowi4 fl4 = {};
+ u8 nud_state, tos, ttl;
char *encap_header;
- int ttl, err;
- u8 nud_state;
+ int err;
if (max_encap_size < ipv4_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
@@ -2312,6 +2336,10 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
err = -EOPNOTSUPP;
goto free_encap;
}
+
+ tos = tun_key->tos;
+ ttl = tun_key->ttl;
+
fl4.flowi4_tos = tun_key->tos;
fl4.daddr = tun_key->u.ipv4.dst;
fl4.saddr = tun_key->u.ipv4.src;
@@ -2346,7 +2374,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
switch (e->tunnel_type) {
case MLX5_HEADER_TYPE_VXLAN:
gen_vxlan_header_ipv4(out_dev, encap_header,
- ipv4_encap_size, e->h_dest, ttl,
+ ipv4_encap_size, e->h_dest, tos, ttl,
fl4.daddr,
fl4.saddr, tun_key->tp_dst,
tunnel_id_to_key32(tun_key->tun_id));
@@ -2394,9 +2422,9 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
struct net_device *out_dev;
struct neighbour *n = NULL;
struct flowi6 fl6 = {};
+ u8 nud_state, tos, ttl;
char *encap_header;
- int err, ttl = 0;
- u8 nud_state;
+ int err;
if (max_encap_size < ipv6_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
@@ -2418,6 +2446,9 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
goto free_encap;
}
+ tos = tun_key->tos;
+ ttl = tun_key->ttl;
+
fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
fl6.daddr = tun_key->u.ipv6.dst;
fl6.saddr = tun_key->u.ipv6.src;
@@ -2452,7 +2483,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
switch (e->tunnel_type) {
case MLX5_HEADER_TYPE_VXLAN:
gen_vxlan_header_ipv6(out_dev, encap_header,
- ipv6_encap_size, e->h_dest, ttl,
+ ipv6_encap_size, e->h_dest, tos, ttl,
&fl6.daddr,
&fl6.saddr, tun_key->tp_dst,
tunnel_id_to_key32(tun_key->tun_id));
@@ -2498,11 +2529,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw,
- REP_ETH);
- struct net_device *up_dev = uplink_rpriv->netdev;
unsigned short family = ip_tunnel_info_af(tun_info);
- struct mlx5e_priv *up_priv = netdev_priv(up_dev);
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct ip_tunnel_key *key = &tun_info->key;
struct mlx5e_encap_entry *e;
@@ -2522,7 +2549,7 @@ vxlan_encap_offload_err:
return -EOPNOTSUPP;
}
- if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
+ if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->tp_dst)) &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
tunnel_type = MLX5_HEADER_TYPE_VXLAN;
} else {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 9106ea45e3cb..ae73ea992845 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -66,22 +66,21 @@ static inline void mlx5e_tx_dma_unmap(struct device *pdev,
}
}
+static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
+{
+ return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
+}
+
static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq,
dma_addr_t addr,
u32 size,
enum mlx5e_dma_map_type map_type)
{
- u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask;
+ struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
- sq->db.dma_fifo[i].addr = addr;
- sq->db.dma_fifo[i].size = size;
- sq->db.dma_fifo[i].type = map_type;
- sq->dma_fifo_pc++;
-}
-
-static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
-{
- return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
+ dma->addr = addr;
+ dma->size = size;
+ dma->type = map_type;
}
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 4e1f99a98d5d..85d517360157 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -32,6 +32,7 @@
#include <linux/irq.h>
#include "en.h"
+#include "en/xdp.h"
static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
{
@@ -84,6 +85,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
for (i = 0; i < c->num_tc; i++)
busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
+ busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq);
+
if (c->xdp)
busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq);
@@ -116,6 +119,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
mlx5e_cq_arm(&c->rq.cq);
mlx5e_cq_arm(&c->icosq.cq);
+ mlx5e_cq_arm(&c->xdpsq.cq);
return work_done;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index b79d74860a30..40dba9e8af92 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1696,7 +1696,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
int vport_num;
int err;
- if (!MLX5_VPORT_MANAGER(dev))
+ if (!MLX5_ESWITCH_MANAGER(dev))
return 0;
esw_info(dev,
@@ -1765,7 +1765,7 @@ abort:
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
{
- if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
+ if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev))
return;
esw_info(esw->dev, "cleanup\n");
@@ -2216,6 +2216,6 @@ free_out:
u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
{
- return esw->mode;
+ return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE;
}
EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index f880ffc9acd6..a21df24b695e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1889,7 +1889,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
if (!fwd_next_prio_supported(ft))
return ERR_PTR(-EOPNOTSUPP);
- if (dest)
+ if (dest_num)
return ERR_PTR(-EINVAL);
mutex_lock(&root->chain_lock);
next_ft = find_next_chained_ft(prio);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index af3bb2f7a504..b7c21eb21a21 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -76,6 +76,7 @@ void mlx5i_init(struct mlx5_core_dev *mdev,
void *ppriv)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ u16 max_mtu;
/* priv init */
priv->mdev = mdev;
@@ -84,6 +85,9 @@ void mlx5i_init(struct mlx5_core_dev *mdev,
priv->ppriv = ppriv;
mutex_init(&priv->state_lock);
+ mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
+ netdev->mtu = max_mtu;
+
mlx5e_build_nic_params(mdev, &priv->channels.params,
profile->max_nch(mdev), netdev->mtu);
mlx5i_build_nic_params(mdev, &priv->channels.params);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 1e062e6b2587..3f767cde4c1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -488,6 +488,7 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
void mlx5_init_clock(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = &mdev->clock;
+ u64 overflow_cycles;
u64 ns;
u64 frac = 0;
u32 dev_freq;
@@ -511,10 +512,17 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
/* Calculate period in seconds to call the overflow watchdog - to make
* sure counter is checked at least once every wrap around.
+ * The period is calculated as the minimum between max HW cycles count
+ * (The clock source mask) and max amount of cycles that can be
+ * multiplied by clock multiplier where the result doesn't exceed
+ * 64bits.
*/
- ns = cyclecounter_cyc2ns(&clock->cycles, clock->cycles.mask,
+ overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
+ overflow_cycles = min(overflow_cycles, clock->cycles.mask >> 1);
+
+ ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
frac, &frac);
- do_div(ns, NSEC_PER_SEC / 2 / HZ);
+ do_div(ns, NSEC_PER_SEC / HZ);
clock->overflow_period = ns;
mdev->clock_info_page = alloc_page(GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
new file mode 100644
index 000000000000..9a8fd762167b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mlx5/driver.h>
+#include "mlx5_core.h"
+#include "vxlan.h"
+
+struct mlx5_vxlan {
+ struct mlx5_core_dev *mdev;
+ spinlock_t lock; /* protect vxlan table */
+ /* max_num_ports is usuallly 4, 16 buckets is more than enough */
+ DECLARE_HASHTABLE(htable, 4);
+ int num_ports;
+ struct mutex sync_lock; /* sync add/del port HW operations */
+};
+
+struct mlx5_vxlan_port {
+ struct hlist_node hlist;
+ atomic_t refcount;
+ u16 udp_port;
+};
+
+static inline u8 mlx5_vxlan_max_udp_ports(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_ETH(mdev, max_vxlan_udp_ports) ?: 4;
+}
+
+static int mlx5_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port)
+{
+ u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0};
+
+ MLX5_SET(add_vxlan_udp_dport_in, in, opcode,
+ MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT);
+ MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port);
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
+{
+ u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0};
+
+ MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
+ MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
+ MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port);
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static struct mlx5_vxlan_port*
+mlx5_vxlan_lookup_port_locked(struct mlx5_vxlan *vxlan, u16 port)
+{
+ struct mlx5_vxlan_port *vxlanp;
+
+ hash_for_each_possible(vxlan->htable, vxlanp, hlist, port) {
+ if (vxlanp->udp_port == port)
+ return vxlanp;
+ }
+
+ return NULL;
+}
+
+struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port)
+{
+ struct mlx5_vxlan_port *vxlanp;
+
+ if (!mlx5_vxlan_allowed(vxlan))
+ return NULL;
+
+ spin_lock_bh(&vxlan->lock);
+ vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port);
+ spin_unlock_bh(&vxlan->lock);
+
+ return vxlanp;
+}
+
+int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port)
+{
+ struct mlx5_vxlan_port *vxlanp;
+ int ret = -ENOSPC;
+
+ vxlanp = mlx5_vxlan_lookup_port(vxlan, port);
+ if (vxlanp) {
+ atomic_inc(&vxlanp->refcount);
+ return 0;
+ }
+
+ mutex_lock(&vxlan->sync_lock);
+ if (vxlan->num_ports >= mlx5_vxlan_max_udp_ports(vxlan->mdev)) {
+ mlx5_core_info(vxlan->mdev,
+ "UDP port (%d) not offloaded, max number of UDP ports (%d) are already offloaded\n",
+ port, mlx5_vxlan_max_udp_ports(vxlan->mdev));
+ ret = -ENOSPC;
+ goto unlock;
+ }
+
+ ret = mlx5_vxlan_core_add_port_cmd(vxlan->mdev, port);
+ if (ret)
+ goto unlock;
+
+ vxlanp = kzalloc(sizeof(*vxlanp), GFP_KERNEL);
+ if (!vxlanp) {
+ ret = -ENOMEM;
+ goto err_delete_port;
+ }
+
+ vxlanp->udp_port = port;
+ atomic_set(&vxlanp->refcount, 1);
+
+ spin_lock_bh(&vxlan->lock);
+ hash_add(vxlan->htable, &vxlanp->hlist, port);
+ spin_unlock_bh(&vxlan->lock);
+
+ vxlan->num_ports++;
+ mutex_unlock(&vxlan->sync_lock);
+ return 0;
+
+err_delete_port:
+ mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
+
+unlock:
+ mutex_unlock(&vxlan->sync_lock);
+ return ret;
+}
+
+int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port)
+{
+ struct mlx5_vxlan_port *vxlanp;
+ bool remove = false;
+ int ret = 0;
+
+ mutex_lock(&vxlan->sync_lock);
+
+ spin_lock_bh(&vxlan->lock);
+ vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port);
+ if (!vxlanp) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ if (atomic_dec_and_test(&vxlanp->refcount)) {
+ hash_del(&vxlanp->hlist);
+ remove = true;
+ }
+
+out_unlock:
+ spin_unlock_bh(&vxlan->lock);
+
+ if (remove) {
+ mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
+ kfree(vxlanp);
+ vxlan->num_ports--;
+ }
+
+ mutex_unlock(&vxlan->sync_lock);
+
+ return ret;
+}
+
+struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_vxlan *vxlan;
+
+ if (!MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) || !mlx5_core_is_pf(mdev))
+ return ERR_PTR(-ENOTSUPP);
+
+ vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
+ if (!vxlan)
+ return ERR_PTR(-ENOMEM);
+
+ vxlan->mdev = mdev;
+ mutex_init(&vxlan->sync_lock);
+ spin_lock_init(&vxlan->lock);
+ hash_init(vxlan->htable);
+
+ /* Hardware adds 4789 by default */
+ mlx5_vxlan_add_port(vxlan, 4789);
+
+ return vxlan;
+}
+
+void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan)
+{
+ struct mlx5_vxlan_port *vxlanp;
+ struct hlist_node *tmp;
+ int bkt;
+
+ if (!mlx5_vxlan_allowed(vxlan))
+ return;
+
+ /* Lockless since we are the only hash table consumers*/
+ hash_for_each_safe(vxlan->htable, bkt, tmp, vxlanp, hlist) {
+ hash_del(&vxlanp->hlist);
+ mlx5_vxlan_core_del_port_cmd(vxlan->mdev, vxlanp->udp_port);
+ kfree(vxlanp);
+ }
+
+ kfree(vxlan);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
index 5ef6ae7d568a..fd874a30c4d0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
@@ -33,31 +33,32 @@
#define __MLX5_VXLAN_H__
#include <linux/mlx5/driver.h>
-#include "en.h"
-struct mlx5e_vxlan {
- atomic_t refcount;
- u16 udp_port;
-};
+struct mlx5_vxlan;
+struct mlx5_vxlan_port;
-struct mlx5e_vxlan_work {
- struct work_struct work;
- struct mlx5e_priv *priv;
- sa_family_t sa_family;
- u16 port;
-};
+#ifdef CONFIG_MLX5_CORE_EN
-static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
+static inline bool mlx5_vxlan_allowed(struct mlx5_vxlan *vxlan)
{
- return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
- mlx5_core_is_pf(mdev));
+ /* not allowed reason is encoded in vxlan pointer as error,
+ * on mlx5_vxlan_create
+ */
+ return !IS_ERR_OR_NULL(vxlan);
}
-void mlx5e_vxlan_init(struct mlx5e_priv *priv);
-void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv);
+struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev);
+void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan);
+int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port);
+int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port);
+struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port);
-void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
- u16 port, int add);
-struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port);
+#else
+
+static inline struct mlx5_vxlan*
+mlx5_vxlan_create(struct mlx5_core_dev *mdev) { return ERR_PTR(-ENOTSUPP); }
+static inline void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) { return; }
+
+#endif
#endif /* __MLX5_VXLAN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 6ddbb70e95de..03b9c6733eed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -62,6 +62,7 @@
#include "accel/ipsec.h"
#include "accel/tls.h"
#include "lib/clock.h"
+#include "lib/vxlan.h"
#include "diag/fw_tracer.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
@@ -961,6 +962,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_init_clock(dev);
+ dev->vxlan = mlx5_vxlan_create(dev);
+
err = mlx5_init_rl_table(dev);
if (err) {
dev_err(&pdev->dev, "Failed to init rate limiting\n");
@@ -1004,6 +1007,7 @@ err_mpfs_cleanup:
err_rl_cleanup:
mlx5_cleanup_rl_table(dev);
err_tables_cleanup:
+ mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
@@ -1024,6 +1028,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_eswitch_cleanup(dev->priv.eswitch);
mlx5_mpfs_cleanup(dev);
mlx5_cleanup_rl_table(dev);
+ mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_clock(dev);
mlx5_cleanup_reserved_gids(dev);
mlx5_cleanup_mkey_table(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
deleted file mode 100644
index 2f74953e4561..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies, Ltd. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mlx5/driver.h>
-#include "mlx5_core.h"
-#include "vxlan.h"
-
-void mlx5e_vxlan_init(struct mlx5e_priv *priv)
-{
- struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
-
- spin_lock_init(&vxlan_db->lock);
- INIT_RADIX_TREE(&vxlan_db->tree, GFP_ATOMIC);
-}
-
-static int mlx5e_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port)
-{
- u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0};
-
- MLX5_SET(add_vxlan_udp_dport_in, in, opcode,
- MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT);
- MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-}
-
-static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
-{
- u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0};
-
- MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
- MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
- MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-}
-
-struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
-{
- struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
- struct mlx5e_vxlan *vxlan;
-
- spin_lock_bh(&vxlan_db->lock);
- vxlan = radix_tree_lookup(&vxlan_db->tree, port);
- spin_unlock_bh(&vxlan_db->lock);
-
- return vxlan;
-}
-
-static void mlx5e_vxlan_add_port(struct work_struct *work)
-{
- struct mlx5e_vxlan_work *vxlan_work =
- container_of(work, struct mlx5e_vxlan_work, work);
- struct mlx5e_priv *priv = vxlan_work->priv;
- struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
- u16 port = vxlan_work->port;
- struct mlx5e_vxlan *vxlan;
- int err;
-
- mutex_lock(&priv->state_lock);
- vxlan = mlx5e_vxlan_lookup_port(priv, port);
- if (vxlan) {
- atomic_inc(&vxlan->refcount);
- goto free_work;
- }
-
- if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
- goto free_work;
-
- vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
- if (!vxlan)
- goto err_delete_port;
-
- vxlan->udp_port = port;
- atomic_set(&vxlan->refcount, 1);
-
- spin_lock_bh(&vxlan_db->lock);
- err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan);
- spin_unlock_bh(&vxlan_db->lock);
- if (err)
- goto err_free;
-
- goto free_work;
-
-err_free:
- kfree(vxlan);
-err_delete_port:
- mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
-free_work:
- mutex_unlock(&priv->state_lock);
- kfree(vxlan_work);
-}
-
-static void mlx5e_vxlan_del_port(struct work_struct *work)
-{
- struct mlx5e_vxlan_work *vxlan_work =
- container_of(work, struct mlx5e_vxlan_work, work);
- struct mlx5e_priv *priv = vxlan_work->priv;
- struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
- u16 port = vxlan_work->port;
- struct mlx5e_vxlan *vxlan;
- bool remove = false;
-
- mutex_lock(&priv->state_lock);
- spin_lock_bh(&vxlan_db->lock);
- vxlan = radix_tree_lookup(&vxlan_db->tree, port);
- if (!vxlan)
- goto out_unlock;
-
- if (atomic_dec_and_test(&vxlan->refcount)) {
- radix_tree_delete(&vxlan_db->tree, port);
- remove = true;
- }
-
-out_unlock:
- spin_unlock_bh(&vxlan_db->lock);
-
- if (remove) {
- mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
- kfree(vxlan);
- }
- mutex_unlock(&priv->state_lock);
- kfree(vxlan_work);
-}
-
-void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
- u16 port, int add)
-{
- struct mlx5e_vxlan_work *vxlan_work;
-
- vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
- if (!vxlan_work)
- return;
-
- if (add)
- INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_port);
- else
- INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_port);
-
- vxlan_work->priv = priv;
- vxlan_work->port = port;
- vxlan_work->sa_family = sa_family;
- queue_work(priv->wq, &vxlan_work->work);
-}
-
-void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
-{
- struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
- struct mlx5e_vxlan *vxlan;
- unsigned int port = 0;
-
- /* Lockless since we are the only radix-tree consumers, wq is disabled */
- while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) {
- port = vxlan->udp_port;
- radix_tree_delete(&vxlan_db->tree, port);
- mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
- kfree(vxlan);
- }
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index b97bb72b4db4..86478a6b99c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -113,35 +113,45 @@ err_db_free:
return err;
}
-static void mlx5e_qp_set_frag_buf(struct mlx5_frag_buf *buf,
- struct mlx5_wq_qp *qp)
+static void mlx5_qp_set_frag_buf(struct mlx5_frag_buf *buf,
+ struct mlx5_wq_qp *qp)
{
+ struct mlx5_frag_buf_ctrl *sq_fbc;
struct mlx5_frag_buf *rqb, *sqb;
- rqb = &qp->rq.fbc.frag_buf;
+ rqb = &qp->rq.fbc.frag_buf;
*rqb = *buf;
rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
- rqb->npages = 1 << get_order(rqb->size);
+ rqb->npages = DIV_ROUND_UP(rqb->size, PAGE_SIZE);
- sqb = &qp->sq.fbc.frag_buf;
- *sqb = *buf;
- sqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
- sqb->npages = 1 << get_order(sqb->size);
+ sq_fbc = &qp->sq.fbc;
+ sqb = &sq_fbc->frag_buf;
+ *sqb = *buf;
+ sqb->size = mlx5_wq_cyc_get_byte_size(&qp->sq);
+ sqb->npages = DIV_ROUND_UP(sqb->size, PAGE_SIZE);
sqb->frags += rqb->npages; /* first part is for the rq */
+ if (sq_fbc->strides_offset)
+ sqb->frags--;
}
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
+ u32 sq_strides_offset;
int err;
mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
MLX5_GET(qpc, qpc, log_rq_size),
&wq->rq.fbc);
- mlx5_fill_fbc(ilog2(MLX5_SEND_WQE_BB),
- MLX5_GET(qpc, qpc, log_sq_size),
- &wq->sq.fbc);
+
+ sq_strides_offset =
+ ((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB;
+
+ mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
+ MLX5_GET(qpc, qpc, log_sq_size),
+ sq_strides_offset,
+ &wq->sq.fbc);
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
@@ -156,7 +166,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
goto err_db_free;
}
- mlx5e_qp_set_frag_buf(&wq_ctrl->buf, wq);
+ mlx5_qp_set_frag_buf(&wq_ctrl->buf, wq);
wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 82827a8d3d67..8a291eb36c64 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -78,6 +78,7 @@ config MLXSW_SPECTRUM
depends on IPV6 || IPV6=n
depends on NET_IPGRE || NET_IPGRE=n
depends on IPV6_GRE || IPV6_GRE=n
+ select GENERIC_ALLOCATOR
select PARMAN
select MLXFW
default m
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 4a1069166119..68fa44a41485 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -18,7 +18,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum1_kvdl.o spectrum2_kvdl.o \
spectrum_kvdl.o \
spectrum_acl_tcam.o spectrum_acl_ctcam.o \
- spectrum_acl_atcam.o \
+ spectrum_acl_atcam.o spectrum_acl_erp.o \
spectrum1_acl_tcam.o spectrum2_acl_tcam.o \
spectrum_acl.o \
spectrum_flower.o spectrum_cnt.o \
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index a4669e79fdf9..66ea256fe560 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -327,12 +327,16 @@ static void mlxsw_afa_resource_add(struct mlxsw_afa_block *block,
list_add(&resource->list, &block->resource_list);
}
+static void mlxsw_afa_resource_del(struct mlxsw_afa_resource *resource)
+{
+ list_del(&resource->list);
+}
+
static void mlxsw_afa_resources_destroy(struct mlxsw_afa_block *block)
{
struct mlxsw_afa_resource *resource, *tmp;
list_for_each_entry_safe(resource, tmp, &block->resource_list, list) {
- list_del(&resource->list);
resource->destructor(block, resource);
}
}
@@ -565,6 +569,7 @@ static void
mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block,
struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref)
{
+ mlxsw_afa_resource_del(&fwd_entry_ref->resource);
mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry);
kfree(fwd_entry_ref);
}
@@ -614,6 +619,7 @@ static void
mlxsw_afa_counter_destroy(struct mlxsw_afa_block *block,
struct mlxsw_afa_counter *counter)
{
+ mlxsw_afa_resource_del(&counter->resource);
block->afa->ops->counter_index_put(block->afa->ops_priv,
counter->counter_index);
kfree(counter);
@@ -661,8 +667,8 @@ static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
char *oneact;
char *actions;
- if (WARN_ON(block->finished))
- return NULL;
+ if (block->finished)
+ return ERR_PTR(-EINVAL);
if (block->cur_act_index + action_size >
block->afa->max_acts_per_set) {
struct mlxsw_afa_set *set;
@@ -672,7 +678,7 @@ static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
*/
set = mlxsw_afa_set_create(false);
if (!set)
- return NULL;
+ return ERR_PTR(-ENOBUFS);
set->prev = block->cur_set;
block->cur_act_index = 0;
block->cur_set->next = set;
@@ -760,9 +766,9 @@ int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
MLXSW_AFA_VLAN_CODE,
MLXSW_AFA_VLAN_SIZE);
- if (!act) {
+ if (IS_ERR(act)) {
NL_SET_ERR_MSG_MOD(extack, "Cannot append vlan_modify action");
- return -ENOBUFS;
+ return PTR_ERR(act);
}
mlxsw_afa_vlan_pack(act, MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
MLXSW_AFA_VLAN_CMD_SET_OUTER, vid,
@@ -844,8 +850,8 @@ int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block)
MLXSW_AFA_TRAPDISC_CODE,
MLXSW_AFA_TRAPDISC_SIZE);
- if (!act)
- return -ENOBUFS;
+ if (IS_ERR(act))
+ return PTR_ERR(act);
mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD, 0);
return 0;
@@ -858,8 +864,8 @@ int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id)
MLXSW_AFA_TRAPDISC_CODE,
MLXSW_AFA_TRAPDISC_SIZE);
- if (!act)
- return -ENOBUFS;
+ if (IS_ERR(act))
+ return PTR_ERR(act);
mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP,
MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD,
trap_id);
@@ -874,8 +880,8 @@ int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
MLXSW_AFA_TRAPDISC_CODE,
MLXSW_AFA_TRAPDISC_SIZE);
- if (!act)
- return -ENOBUFS;
+ if (IS_ERR(act))
+ return PTR_ERR(act);
mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP,
MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD,
trap_id);
@@ -894,6 +900,7 @@ static void
mlxsw_afa_mirror_destroy(struct mlxsw_afa_block *block,
struct mlxsw_afa_mirror *mirror)
{
+ mlxsw_afa_resource_del(&mirror->resource);
block->afa->ops->mirror_del(block->afa->ops_priv,
mirror->local_in_port,
mirror->span_id,
@@ -946,8 +953,8 @@ mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
char *act = mlxsw_afa_block_append_action(block,
MLXSW_AFA_TRAPDISC_CODE,
MLXSW_AFA_TRAPDISC_SIZE);
- if (!act)
- return -ENOBUFS;
+ if (IS_ERR(act))
+ return PTR_ERR(act);
mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD, 0);
mlxsw_afa_trapdisc_mirror_pack(act, true, mirror_agent);
@@ -1043,9 +1050,9 @@ int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
act = mlxsw_afa_block_append_action(block, MLXSW_AFA_FORWARD_CODE,
MLXSW_AFA_FORWARD_SIZE);
- if (!act) {
- err = -ENOBUFS;
+ if (IS_ERR(act)) {
NL_SET_ERR_MSG_MOD(extack, "Cannot append forward action");
+ err = PTR_ERR(act);
goto err_append_action;
}
mlxsw_afa_forward_pack(act, MLXSW_AFA_FORWARD_TYPE_PBS,
@@ -1100,8 +1107,8 @@ int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
{
char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_POLCNT_CODE,
MLXSW_AFA_POLCNT_SIZE);
- if (!act)
- return -ENOBUFS;
+ if (IS_ERR(act))
+ return PTR_ERR(act);
mlxsw_afa_polcnt_pack(act, MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES,
counter_index);
return 0;
@@ -1176,9 +1183,9 @@ int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid,
char *act = mlxsw_afa_block_append_action(block,
MLXSW_AFA_VIRFWD_CODE,
MLXSW_AFA_VIRFWD_SIZE);
- if (!act) {
+ if (IS_ERR(act)) {
NL_SET_ERR_MSG_MOD(extack, "Cannot append fid_set action");
- return -ENOBUFS;
+ return PTR_ERR(act);
}
mlxsw_afa_virfwd_pack(act, MLXSW_AFA_VIRFWD_FID_CMD_SET, fid);
return 0;
@@ -1248,8 +1255,8 @@ int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
char *act = mlxsw_afa_block_append_action(block,
MLXSW_AFA_MCROUTER_CODE,
MLXSW_AFA_MCROUTER_SIZE);
- if (!act)
- return -ENOBUFS;
+ if (IS_ERR(act))
+ return PTR_ERR(act);
mlxsw_afa_mcrouter_pack(act, MLXSW_AFA_MCROUTER_RPF_ACTION_TRAP,
expected_irif, min_mtu, rmid_valid, kvdl_index);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index 5f8485c7640e..9649b4d9349a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -457,7 +457,7 @@ mlxsw_sp_afk_encode_one(const struct mlxsw_afk_element_inst *elinst,
void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
struct mlxsw_afk_key_info *key_info,
struct mlxsw_afk_element_values *values,
- char *key, char *mask)
+ char *key, char *mask, int block_start, int block_end)
{
char block_mask[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE];
char block_key[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE];
@@ -465,7 +465,7 @@ void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
enum mlxsw_afk_element element;
int block_index, i;
- for (i = 0; i < key_info->blocks_count; i++) {
+ for (i = block_start; i <= block_end; i++) {
memset(block_key, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE);
memset(block_mask, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE);
@@ -482,8 +482,10 @@ void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
values->storage.mask);
}
- mlxsw_afk->ops->encode_block(block_key, i, key);
- mlxsw_afk->ops->encode_block(block_mask, i, mask);
+ if (key)
+ mlxsw_afk->ops->encode_block(block_key, i, key);
+ if (mask)
+ mlxsw_afk->ops->encode_block(block_mask, i, mask);
}
}
EXPORT_SYMBOL(mlxsw_afk_encode);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index 2ffde915349b..18d9bfed6001 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -259,6 +259,6 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values,
void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
struct mlxsw_afk_key_info *key_info,
struct mlxsw_afk_element_values *values,
- char *key, char *mask);
+ char *key, char *mask, int block_start, int block_end);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 596fddfb3850..9f344914c4a5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -2466,14 +2466,14 @@ MLXSW_ITEM32(reg, ptce2, priority, 0x04, 0, 24);
MLXSW_ITEM_BUF(reg, ptce2, tcam_region_info, 0x10,
MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN);
-#define MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN 96
+#define MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN 96
/* reg_ptce2_flex_key_blocks
* ACL Key.
* Access: RW
*/
MLXSW_ITEM_BUF(reg, ptce2, flex_key_blocks, 0x20,
- MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN);
+ MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
/* reg_ptce2_mask
* mask- in the same size as key. A bit that is set directs the TCAM
@@ -2482,7 +2482,7 @@ MLXSW_ITEM_BUF(reg, ptce2, flex_key_blocks, 0x20,
* Access: RW
*/
MLXSW_ITEM_BUF(reg, ptce2, mask, 0x80,
- MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN);
+ MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
/* reg_ptce2_flex_action_set
* ACL action set.
@@ -2504,6 +2504,118 @@ static inline void mlxsw_reg_ptce2_pack(char *payload, bool valid,
mlxsw_reg_ptce2_tcam_region_info_memcpy_to(payload, tcam_region_info);
}
+/* PERPT - Policy-Engine ERP Table Register
+ * ----------------------------------------
+ * This register adds and removes eRPs from the eRP table.
+ */
+#define MLXSW_REG_PERPT_ID 0x3021
+#define MLXSW_REG_PERPT_LEN 0x80
+
+MLXSW_REG_DEFINE(perpt, MLXSW_REG_PERPT_ID, MLXSW_REG_PERPT_LEN);
+
+/* reg_perpt_erpt_bank
+ * eRP table bank.
+ * Range 0 .. cap_max_erp_table_banks - 1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, perpt, erpt_bank, 0x00, 16, 4);
+
+/* reg_perpt_erpt_index
+ * Index to eRP table within the eRP bank.
+ * Range is 0 .. cap_max_erp_table_bank_size - 1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, perpt, erpt_index, 0x00, 0, 8);
+
+enum mlxsw_reg_perpt_key_size {
+ MLXSW_REG_PERPT_KEY_SIZE_2KB,
+ MLXSW_REG_PERPT_KEY_SIZE_4KB,
+ MLXSW_REG_PERPT_KEY_SIZE_8KB,
+ MLXSW_REG_PERPT_KEY_SIZE_12KB,
+};
+
+/* reg_perpt_key_size
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, perpt, key_size, 0x04, 0, 4);
+
+/* reg_perpt_bf_bypass
+ * 0 - The eRP is used only if bloom filter state is set for the given
+ * rule.
+ * 1 - The eRP is used regardless of bloom filter state.
+ * The bypass is an OR condition of region_id or eRP. See PERCR.bf_bypass
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, perpt, bf_bypass, 0x08, 8, 1);
+
+/* reg_perpt_erp_id
+ * eRP ID for use by the rules.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, perpt, erp_id, 0x08, 0, 4);
+
+/* reg_perpt_erpt_base_bank
+ * Base eRP table bank, points to head of erp_vector
+ * Range is 0 .. cap_max_erp_table_banks - 1
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, perpt, erpt_base_bank, 0x0C, 16, 4);
+
+/* reg_perpt_erpt_base_index
+ * Base index to eRP table within the eRP bank
+ * Range is 0 .. cap_max_erp_table_bank_size - 1
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, perpt, erpt_base_index, 0x0C, 0, 8);
+
+/* reg_perpt_erp_index_in_vector
+ * eRP index in the vector.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, perpt, erp_index_in_vector, 0x10, 0, 4);
+
+/* reg_perpt_erp_vector
+ * eRP vector.
+ * Access: OP
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, perpt, erp_vector, 0x14, 4, 1);
+
+/* reg_perpt_mask
+ * Mask
+ * 0 - A-TCAM will ignore the bit in key
+ * 1 - A-TCAM will compare the bit in key
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, perpt, mask, 0x20, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
+
+static inline void mlxsw_reg_perpt_erp_vector_pack(char *payload,
+ unsigned long *erp_vector,
+ unsigned long size)
+{
+ unsigned long bit;
+
+ for_each_set_bit(bit, erp_vector, size)
+ mlxsw_reg_perpt_erp_vector_set(payload, bit, true);
+}
+
+static inline void
+mlxsw_reg_perpt_pack(char *payload, u8 erpt_bank, u8 erpt_index,
+ enum mlxsw_reg_perpt_key_size key_size, u8 erp_id,
+ u8 erpt_base_bank, u8 erpt_base_index, u8 erp_index,
+ char *mask)
+{
+ MLXSW_REG_ZERO(perpt, payload);
+ mlxsw_reg_perpt_erpt_bank_set(payload, erpt_bank);
+ mlxsw_reg_perpt_erpt_index_set(payload, erpt_index);
+ mlxsw_reg_perpt_key_size_set(payload, key_size);
+ mlxsw_reg_perpt_bf_bypass_set(payload, true);
+ mlxsw_reg_perpt_erp_id_set(payload, erp_id);
+ mlxsw_reg_perpt_erpt_base_bank_set(payload, erpt_base_bank);
+ mlxsw_reg_perpt_erpt_base_index_set(payload, erpt_base_index);
+ mlxsw_reg_perpt_erp_index_in_vector_set(payload, erp_index);
+ mlxsw_reg_perpt_mask_memcpy_to(payload, mask);
+}
+
/* PERAR - Policy-Engine Region Association Register
* -------------------------------------------------
* This register associates a hw region for region_id's. Changing on the fly
@@ -2545,6 +2657,164 @@ static inline void mlxsw_reg_perar_pack(char *payload, u16 region_id,
mlxsw_reg_perar_hw_region_set(payload, hw_region);
}
+/* PTCE-V3 - Policy-Engine TCAM Entry Register Version 3
+ * -----------------------------------------------------
+ * This register is a new version of PTCE-V2 in order to support the
+ * A-TCAM. This register is not supported by SwitchX/-2 and Spectrum.
+ */
+#define MLXSW_REG_PTCE3_ID 0x3027
+#define MLXSW_REG_PTCE3_LEN 0xF0
+
+MLXSW_REG_DEFINE(ptce3, MLXSW_REG_PTCE3_ID, MLXSW_REG_PTCE3_LEN);
+
+/* reg_ptce3_v
+ * Valid.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce3, v, 0x00, 31, 1);
+
+enum mlxsw_reg_ptce3_op {
+ /* Write operation. Used to write a new entry to the table.
+ * All R/W fields are relevant for new entry. Activity bit is set
+ * for new entries. Write with v = 0 will delete the entry. Must
+ * not be used if an entry exists.
+ */
+ MLXSW_REG_PTCE3_OP_WRITE_WRITE = 0,
+ /* Update operation */
+ MLXSW_REG_PTCE3_OP_WRITE_UPDATE = 1,
+ /* Read operation */
+ MLXSW_REG_PTCE3_OP_QUERY_READ = 0,
+};
+
+/* reg_ptce3_op
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ptce3, op, 0x00, 20, 3);
+
+/* reg_ptce3_priority
+ * Priority of the rule. Higher values win.
+ * For Spectrum-2 range is 1..cap_kvd_size - 1
+ * Note: Priority does not have to be unique per rule.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce3, priority, 0x04, 0, 24);
+
+/* reg_ptce3_tcam_region_info
+ * Opaque object that represents the TCAM region.
+ * Access: Index
+ */
+MLXSW_ITEM_BUF(reg, ptce3, tcam_region_info, 0x10,
+ MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN);
+
+/* reg_ptce3_flex2_key_blocks
+ * ACL key. The key must be masked according to eRP (if exists) or
+ * according to master mask.
+ * Access: Index
+ */
+MLXSW_ITEM_BUF(reg, ptce3, flex2_key_blocks, 0x20,
+ MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
+
+/* reg_ptce3_erp_id
+ * eRP ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptce3, erp_id, 0x80, 0, 4);
+
+/* reg_ptce3_delta_start
+ * Start point of delta_value and delta_mask, in bits. Must not exceed
+ * num_key_blocks * 36 - 8. Reserved when delta_mask = 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptce3, delta_start, 0x84, 0, 10);
+
+/* reg_ptce3_delta_mask
+ * Delta mask.
+ * 0 - Ignore relevant bit in delta_value
+ * 1 - Compare relevant bit in delta_value
+ * Delta mask must not be set for reserved fields in the key blocks.
+ * Note: No delta when no eRPs. Thus, for regions with
+ * PERERP.erpt_pointer_valid = 0 the delta mask must be 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptce3, delta_mask, 0x88, 16, 8);
+
+/* reg_ptce3_delta_value
+ * Delta value.
+ * Bits which are masked by delta_mask must be 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptce3, delta_value, 0x88, 0, 8);
+
+/* reg_ptce3_prune_vector
+ * Pruning vector relative to the PERPT.erp_id.
+ * Used for reducing lookups.
+ * 0 - NEED: Do a lookup using the eRP.
+ * 1 - PRUNE: Do not perform a lookup using the eRP.
+ * Maybe be modified by PEAPBL and PEAPBM.
+ * Note: In Spectrum-2, a region of 8 key blocks must be set to either
+ * all 1's or all 0's.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, ptce3, prune_vector, 0x90, 4, 1);
+
+/* reg_ptce3_prune_ctcam
+ * Pruning on C-TCAM. Used for reducing lookups.
+ * 0 - NEED: Do a lookup in the C-TCAM.
+ * 1 - PRUNE: Do not perform a lookup in the C-TCAM.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce3, prune_ctcam, 0x94, 31, 1);
+
+/* reg_ptce3_large_exists
+ * Large entry key ID exists.
+ * Within the region:
+ * 0 - SINGLE: The large_entry_key_id is not currently in use.
+ * For rule insert: The MSB of the key (blocks 6..11) will be added.
+ * For rule delete: The MSB of the key will be removed.
+ * 1 - NON_SINGLE: The large_entry_key_id is currently in use.
+ * For rule insert: The MSB of the key (blocks 6..11) will not be added.
+ * For rule delete: The MSB of the key will not be removed.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, ptce3, large_exists, 0x98, 31, 1);
+
+/* reg_ptce3_large_entry_key_id
+ * Large entry key ID.
+ * A key for 12 key blocks rules. Reserved when region has less than 12 key
+ * blocks. Must be different for different keys which have the same common
+ * 6 key blocks (MSB, blocks 6..11) key within a region.
+ * Range is 0..cap_max_pe_large_key_id - 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce3, large_entry_key_id, 0x98, 0, 24);
+
+/* reg_ptce3_action_pointer
+ * Pointer to action.
+ * Range is 0..cap_max_kvd_action_sets - 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce3, action_pointer, 0xA0, 0, 24);
+
+static inline void mlxsw_reg_ptce3_pack(char *payload, bool valid,
+ enum mlxsw_reg_ptce3_op op,
+ u32 priority,
+ const char *tcam_region_info,
+ const char *key, u8 erp_id,
+ bool large_exists, u32 lkey_id,
+ u32 action_pointer)
+{
+ MLXSW_REG_ZERO(ptce3, payload);
+ mlxsw_reg_ptce3_v_set(payload, valid);
+ mlxsw_reg_ptce3_op_set(payload, op);
+ mlxsw_reg_ptce3_priority_set(payload, priority);
+ mlxsw_reg_ptce3_tcam_region_info_memcpy_to(payload, tcam_region_info);
+ mlxsw_reg_ptce3_flex2_key_blocks_memcpy_to(payload, key);
+ mlxsw_reg_ptce3_erp_id_set(payload, erp_id);
+ mlxsw_reg_ptce3_large_exists_set(payload, large_exists);
+ mlxsw_reg_ptce3_large_entry_key_id_set(payload, lkey_id);
+ mlxsw_reg_ptce3_action_pointer_set(payload, action_pointer);
+}
+
/* PERCR - Policy-Engine Region Configuration Register
* ---------------------------------------------------
* This register configures the region parameters. The region_id must be
@@ -2598,7 +2868,6 @@ static inline void mlxsw_reg_percr_pack(char *payload, u16 region_id)
mlxsw_reg_percr_atcam_ignore_prune_set(payload, false);
mlxsw_reg_percr_ctcam_ignore_prune_set(payload, false);
mlxsw_reg_percr_bf_bypass_set(payload, true);
- memset(payload + 0x20, 0xff, 96);
}
/* PERERP - Policy-Engine Region eRP Register
@@ -2663,12 +2932,28 @@ MLXSW_ITEM_BIT_ARRAY(reg, pererp, erpt_vector, 0x14, 4, 1);
*/
MLXSW_ITEM32(reg, pererp, master_rp_id, 0x18, 0, 4);
-static inline void mlxsw_reg_pererp_pack(char *payload, u16 region_id)
+static inline void mlxsw_reg_pererp_erp_vector_pack(char *payload,
+ unsigned long *erp_vector,
+ unsigned long size)
+{
+ unsigned long bit;
+
+ for_each_set_bit(bit, erp_vector, size)
+ mlxsw_reg_pererp_erpt_vector_set(payload, bit, true);
+}
+
+static inline void mlxsw_reg_pererp_pack(char *payload, u16 region_id,
+ bool ctcam_le, bool erpt_pointer_valid,
+ u8 erpt_bank_pointer, u8 erpt_pointer,
+ u8 master_rp_id)
{
MLXSW_REG_ZERO(pererp, payload);
mlxsw_reg_pererp_region_id_set(payload, region_id);
- mlxsw_reg_pererp_ctcam_le_set(payload, true);
- mlxsw_reg_pererp_erpt_pointer_valid_set(payload, true);
+ mlxsw_reg_pererp_ctcam_le_set(payload, ctcam_le);
+ mlxsw_reg_pererp_erpt_pointer_valid_set(payload, erpt_pointer_valid);
+ mlxsw_reg_pererp_erpt_bank_pointer_set(payload, erpt_bank_pointer);
+ mlxsw_reg_pererp_erpt_pointer_set(payload, erpt_pointer);
+ mlxsw_reg_pererp_master_rp_id_set(payload, master_rp_id);
}
/* IEDR - Infrastructure Entry Delete Register
@@ -2732,6 +3017,44 @@ static inline void mlxsw_reg_iedr_rec_pack(char *payload, int rec_index,
mlxsw_reg_iedr_rec_index_start_set(payload, rec_index, rec_index_start);
}
+/* QPTS - QoS Priority Trust State Register
+ * ----------------------------------------
+ * This register controls the port policy to calculate the switch priority and
+ * packet color based on incoming packet fields.
+ */
+#define MLXSW_REG_QPTS_ID 0x4002
+#define MLXSW_REG_QPTS_LEN 0x8
+
+MLXSW_REG_DEFINE(qpts, MLXSW_REG_QPTS_ID, MLXSW_REG_QPTS_LEN);
+
+/* reg_qpts_local_port
+ * Local port number.
+ * Access: Index
+ *
+ * Note: CPU port is supported.
+ */
+MLXSW_ITEM32(reg, qpts, local_port, 0x00, 16, 8);
+
+enum mlxsw_reg_qpts_trust_state {
+ MLXSW_REG_QPTS_TRUST_STATE_PCP = 1,
+ MLXSW_REG_QPTS_TRUST_STATE_DSCP = 2, /* For MPLS, trust EXP. */
+};
+
+/* reg_qpts_trust_state
+ * Trust state for a given port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qpts, trust_state, 0x04, 0, 3);
+
+static inline void mlxsw_reg_qpts_pack(char *payload, u8 local_port,
+ enum mlxsw_reg_qpts_trust_state ts)
+{
+ MLXSW_REG_ZERO(qpts, payload);
+
+ mlxsw_reg_qpts_local_port_set(payload, local_port);
+ mlxsw_reg_qpts_trust_state_set(payload, ts);
+}
+
/* QPCR - QoS Policer Configuration Register
* -----------------------------------------
* The QPCR register is used to create policers - that limit
@@ -3044,6 +3367,219 @@ static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
mlxsw_reg_qeec_next_element_index_set(payload, next_index);
}
+/* QRWE - QoS ReWrite Enable
+ * -------------------------
+ * This register configures the rewrite enable per receive port.
+ */
+#define MLXSW_REG_QRWE_ID 0x400F
+#define MLXSW_REG_QRWE_LEN 0x08
+
+MLXSW_REG_DEFINE(qrwe, MLXSW_REG_QRWE_ID, MLXSW_REG_QRWE_LEN);
+
+/* reg_qrwe_local_port
+ * Local port number.
+ * Access: Index
+ *
+ * Note: CPU port is supported. No support for router port.
+ */
+MLXSW_ITEM32(reg, qrwe, local_port, 0x00, 16, 8);
+
+/* reg_qrwe_dscp
+ * Whether to enable DSCP rewrite (default is 0, don't rewrite).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qrwe, dscp, 0x04, 1, 1);
+
+/* reg_qrwe_pcp
+ * Whether to enable PCP and DEI rewrite (default is 0, don't rewrite).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qrwe, pcp, 0x04, 0, 1);
+
+static inline void mlxsw_reg_qrwe_pack(char *payload, u8 local_port,
+ bool rewrite_pcp, bool rewrite_dscp)
+{
+ MLXSW_REG_ZERO(qrwe, payload);
+ mlxsw_reg_qrwe_local_port_set(payload, local_port);
+ mlxsw_reg_qrwe_pcp_set(payload, rewrite_pcp);
+ mlxsw_reg_qrwe_dscp_set(payload, rewrite_dscp);
+}
+
+/* QPDSM - QoS Priority to DSCP Mapping
+ * ------------------------------------
+ * QoS Priority to DSCP Mapping Register
+ */
+#define MLXSW_REG_QPDSM_ID 0x4011
+#define MLXSW_REG_QPDSM_BASE_LEN 0x04 /* base length, without records */
+#define MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN 0x4 /* record length */
+#define MLXSW_REG_QPDSM_PRIO_ENTRY_REC_MAX_COUNT 16
+#define MLXSW_REG_QPDSM_LEN (MLXSW_REG_QPDSM_BASE_LEN + \
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN * \
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_MAX_COUNT)
+
+MLXSW_REG_DEFINE(qpdsm, MLXSW_REG_QPDSM_ID, MLXSW_REG_QPDSM_LEN);
+
+/* reg_qpdsm_local_port
+ * Local Port. Supported for data packets from CPU port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qpdsm, local_port, 0x00, 16, 8);
+
+/* reg_qpdsm_prio_entry_color0_e
+ * Enable update of the entry for color 0 and a given port.
+ * Access: WO
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color0_e,
+ MLXSW_REG_QPDSM_BASE_LEN, 31, 1,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdsm_prio_entry_color0_dscp
+ * DSCP field in the outer label of the packet for color 0 and a given port.
+ * Reserved when e=0.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color0_dscp,
+ MLXSW_REG_QPDSM_BASE_LEN, 24, 6,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdsm_prio_entry_color1_e
+ * Enable update of the entry for color 1 and a given port.
+ * Access: WO
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color1_e,
+ MLXSW_REG_QPDSM_BASE_LEN, 23, 1,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdsm_prio_entry_color1_dscp
+ * DSCP field in the outer label of the packet for color 1 and a given port.
+ * Reserved when e=0.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color1_dscp,
+ MLXSW_REG_QPDSM_BASE_LEN, 16, 6,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdsm_prio_entry_color2_e
+ * Enable update of the entry for color 2 and a given port.
+ * Access: WO
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color2_e,
+ MLXSW_REG_QPDSM_BASE_LEN, 15, 1,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdsm_prio_entry_color2_dscp
+ * DSCP field in the outer label of the packet for color 2 and a given port.
+ * Reserved when e=0.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color2_dscp,
+ MLXSW_REG_QPDSM_BASE_LEN, 8, 6,
+ MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_qpdsm_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(qpdsm, payload);
+ mlxsw_reg_qpdsm_local_port_set(payload, local_port);
+}
+
+static inline void
+mlxsw_reg_qpdsm_prio_pack(char *payload, unsigned short prio, u8 dscp)
+{
+ mlxsw_reg_qpdsm_prio_entry_color0_e_set(payload, prio, 1);
+ mlxsw_reg_qpdsm_prio_entry_color0_dscp_set(payload, prio, dscp);
+ mlxsw_reg_qpdsm_prio_entry_color1_e_set(payload, prio, 1);
+ mlxsw_reg_qpdsm_prio_entry_color1_dscp_set(payload, prio, dscp);
+ mlxsw_reg_qpdsm_prio_entry_color2_e_set(payload, prio, 1);
+ mlxsw_reg_qpdsm_prio_entry_color2_dscp_set(payload, prio, dscp);
+}
+
+/* QPDPM - QoS Port DSCP to Priority Mapping Register
+ * --------------------------------------------------
+ * This register controls the mapping from DSCP field to
+ * Switch Priority for IP packets.
+ */
+#define MLXSW_REG_QPDPM_ID 0x4013
+#define MLXSW_REG_QPDPM_BASE_LEN 0x4 /* base length, without records */
+#define MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN 0x2 /* record length */
+#define MLXSW_REG_QPDPM_DSCP_ENTRY_REC_MAX_COUNT 64
+#define MLXSW_REG_QPDPM_LEN (MLXSW_REG_QPDPM_BASE_LEN + \
+ MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN * \
+ MLXSW_REG_QPDPM_DSCP_ENTRY_REC_MAX_COUNT)
+
+MLXSW_REG_DEFINE(qpdpm, MLXSW_REG_QPDPM_ID, MLXSW_REG_QPDPM_LEN);
+
+/* reg_qpdpm_local_port
+ * Local Port. Supported for data packets from CPU port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qpdpm, local_port, 0x00, 16, 8);
+
+/* reg_qpdpm_dscp_e
+ * Enable update of the specific entry. When cleared, the switch_prio and color
+ * fields are ignored and the previous switch_prio and color values are
+ * preserved.
+ * Access: WO
+ */
+MLXSW_ITEM16_INDEXED(reg, qpdpm, dscp_entry_e, MLXSW_REG_QPDPM_BASE_LEN, 15, 1,
+ MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_qpdpm_dscp_prio
+ * The new Switch Priority value for the relevant DSCP value.
+ * Access: RW
+ */
+MLXSW_ITEM16_INDEXED(reg, qpdpm, dscp_entry_prio,
+ MLXSW_REG_QPDPM_BASE_LEN, 0, 4,
+ MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_qpdpm_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(qpdpm, payload);
+ mlxsw_reg_qpdpm_local_port_set(payload, local_port);
+}
+
+static inline void
+mlxsw_reg_qpdpm_dscp_pack(char *payload, unsigned short dscp, u8 prio)
+{
+ mlxsw_reg_qpdpm_dscp_entry_e_set(payload, dscp, 1);
+ mlxsw_reg_qpdpm_dscp_entry_prio_set(payload, dscp, prio);
+}
+
+/* QTCTM - QoS Switch Traffic Class Table is Multicast-Aware Register
+ * ------------------------------------------------------------------
+ * This register configures if the Switch Priority to Traffic Class mapping is
+ * based on Multicast packet indication. If so, then multicast packets will get
+ * a Traffic Class that is plus (cap_max_tclass_data/2) the value configured by
+ * QTCT.
+ * By default, Switch Priority to Traffic Class mapping is not based on
+ * Multicast packet indication.
+ */
+#define MLXSW_REG_QTCTM_ID 0x401A
+#define MLXSW_REG_QTCTM_LEN 0x08
+
+MLXSW_REG_DEFINE(qtctm, MLXSW_REG_QTCTM_ID, MLXSW_REG_QTCTM_LEN);
+
+/* reg_qtctm_local_port
+ * Local port number.
+ * No support for CPU port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qtctm, local_port, 0x00, 16, 8);
+
+/* reg_qtctm_mc
+ * Multicast Mode
+ * Whether Switch Priority to Traffic Class mapping is based on Multicast packet
+ * indication (default is 0, not based on Multicast packet indication).
+ */
+MLXSW_ITEM32(reg, qtctm, mc, 0x04, 0, 1);
+
+static inline void
+mlxsw_reg_qtctm_pack(char *payload, u8 local_port, bool mc)
+{
+ MLXSW_REG_ZERO(qtctm, payload);
+ mlxsw_reg_qtctm_local_port_set(payload, local_port);
+ mlxsw_reg_qtctm_mc_set(payload, mc);
+}
+
/* PMLP - Ports Module to Local Port Register
* ------------------------------------------
* Configures the assignment of modules to local ports.
@@ -8248,13 +8784,20 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(prcr),
MLXSW_REG(pefa),
MLXSW_REG(ptce2),
+ MLXSW_REG(perpt),
MLXSW_REG(perar),
+ MLXSW_REG(ptce3),
MLXSW_REG(percr),
MLXSW_REG(pererp),
MLXSW_REG(iedr),
+ MLXSW_REG(qpts),
MLXSW_REG(qpcr),
MLXSW_REG(qtct),
MLXSW_REG(qeec),
+ MLXSW_REG(qrwe),
+ MLXSW_REG(qpdsm),
+ MLXSW_REG(qpdpm),
+ MLXSW_REG(qtctm),
MLXSW_REG(pmlp),
MLXSW_REG(pmtu),
MLXSW_REG(ptys),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index f672a7b71de7..bf650f2cd5af 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -65,6 +65,13 @@ enum mlxsw_res_id {
MLXSW_RES_ID_ACL_FLEX_KEYS,
MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE,
MLXSW_RES_ID_ACL_ACTIONS_PER_SET,
+ MLXSW_RES_ID_ACL_MAX_ERPT_BANKS,
+ MLXSW_RES_ID_ACL_MAX_ERPT_BANK_SIZE,
+ MLXSW_RES_ID_ACL_MAX_LARGE_KEY_ID,
+ MLXSW_RES_ID_ACL_ERPT_ENTRIES_2KB,
+ MLXSW_RES_ID_ACL_ERPT_ENTRIES_4KB,
+ MLXSW_RES_ID_ACL_ERPT_ENTRIES_8KB,
+ MLXSW_RES_ID_ACL_ERPT_ENTRIES_12KB,
MLXSW_RES_ID_MAX_CPU_POLICERS,
MLXSW_RES_ID_MAX_VRS,
MLXSW_RES_ID_MAX_RIFS,
@@ -108,6 +115,13 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_ACL_FLEX_KEYS] = 0x2910,
[MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE] = 0x2911,
[MLXSW_RES_ID_ACL_ACTIONS_PER_SET] = 0x2912,
+ [MLXSW_RES_ID_ACL_MAX_ERPT_BANKS] = 0x2940,
+ [MLXSW_RES_ID_ACL_MAX_ERPT_BANK_SIZE] = 0x2941,
+ [MLXSW_RES_ID_ACL_MAX_LARGE_KEY_ID] = 0x2942,
+ [MLXSW_RES_ID_ACL_ERPT_ENTRIES_2KB] = 0x2950,
+ [MLXSW_RES_ID_ACL_ERPT_ENTRIES_4KB] = 0x2951,
+ [MLXSW_RES_ID_ACL_ERPT_ENTRIES_8KB] = 0x2952,
+ [MLXSW_RES_ID_ACL_ERPT_ENTRIES_12KB] = 0x2953,
[MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13,
[MLXSW_RES_ID_MAX_VRS] = 0x2C01,
[MLXSW_RES_ID_MAX_RIFS] = 0x2C02,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 039228525fb1..028ecc9aa5f1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -2793,9 +2793,16 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
false, 0);
if (err)
return err;
+
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_TC,
+ i + 8, i,
+ false, 0);
+ if (err)
+ return err;
}
- /* Make sure the max shaper is disabled in all hierarcies that
+ /* Make sure the max shaper is disabled in all hierarchies that
* support it.
*/
err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
@@ -2830,6 +2837,16 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
return 0;
}
+static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qtctm_pl[MLXSW_REG_QTCTM_LEN];
+
+ mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
+}
+
static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
bool split, u8 module, u8 width, u8 lane)
{
@@ -2958,6 +2975,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_ets_init;
}
+ err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_tc_mc_mode;
+ }
+
/* ETS and buffers must be initialized before DCB. */
err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
if (err) {
@@ -3014,6 +3038,8 @@ err_port_qdiscs_init:
err_port_fids_init:
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
err_port_dcb_init:
+ mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
+err_port_tc_mc_mode:
err_port_ets_init:
err_port_buffers_init:
err_port_admin_status_set:
@@ -3048,6 +3074,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
mlxsw_sp_port_fids_fini(mlxsw_sp_port);
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
+ mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
mlxsw_sp_port_module_unmap(mlxsw_sp_port);
kfree(mlxsw_sp_port->sample);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 589c63daf085..13eca1a79d52 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -1,6 +1,6 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum.h
- * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
* Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
* Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
@@ -54,6 +54,7 @@
#include "core.h"
#include "core_acl_flex_keys.h"
#include "core_acl_flex_actions.h"
+#include "reg.h"
#define MLXSW_SP_FID_8021D_MAX 1024
@@ -243,6 +244,7 @@ struct mlxsw_sp_port {
struct ieee_ets *ets;
struct ieee_maxrate *maxrate;
struct ieee_pfc *pfc;
+ enum mlxsw_reg_qpts_trust_state trust_state;
} dcb;
struct {
u8 module;
@@ -628,6 +630,7 @@ struct mlxsw_sp_acl_tcam_ops {
void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
size_t region_priv_size;
int (*region_init)(struct mlxsw_sp *mlxsw_sp, void *region_priv,
+ void *tcam_priv,
struct mlxsw_sp_acl_tcam_region *region);
void (*region_fini)(struct mlxsw_sp *mlxsw_sp, void *region_priv);
int (*region_associate)(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
index d339ec43d79c..5c8956573632 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
@@ -58,6 +58,26 @@ struct mlxsw_sp1_acl_tcam_entry {
struct mlxsw_sp_acl_ctcam_entry centry;
};
+static int
+mlxsw_sp1_acl_ctcam_region_entry_insert(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ const char *mask)
+{
+ return 0;
+}
+
+static void
+mlxsw_sp1_acl_ctcam_region_entry_remove(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+}
+
+static const struct mlxsw_sp_acl_ctcam_region_ops
+mlxsw_sp1_acl_ctcam_region_ops = {
+ .entry_insert = mlxsw_sp1_acl_ctcam_region_entry_insert,
+ .entry_remove = mlxsw_sp1_acl_ctcam_region_entry_remove,
+};
+
static int mlxsw_sp1_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
struct mlxsw_sp_acl_tcam *tcam)
{
@@ -122,13 +142,15 @@ mlxsw_sp1_acl_ctcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp1_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv,
+ void *tcam_priv,
struct mlxsw_sp_acl_tcam_region *_region)
{
struct mlxsw_sp1_acl_tcam_region *region = region_priv;
int err;
err = mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &region->cregion,
- _region);
+ _region,
+ &mlxsw_sp1_acl_ctcam_region_ops);
if (err)
return err;
err = mlxsw_sp1_acl_ctcam_region_catchall_add(mlxsw_sp, region);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
index d7f1fb35ea2a..22c876496379 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
@@ -39,23 +39,64 @@
#include "core_acl_flex_actions.h"
struct mlxsw_sp2_acl_tcam {
+ struct mlxsw_sp_acl_atcam atcam;
u32 kvdl_index;
unsigned int kvdl_count;
};
struct mlxsw_sp2_acl_tcam_region {
- struct mlxsw_sp_acl_ctcam_region cregion;
+ struct mlxsw_sp_acl_atcam_region aregion;
+ struct mlxsw_sp_acl_tcam_region *region;
};
struct mlxsw_sp2_acl_tcam_chunk {
- struct mlxsw_sp_acl_ctcam_chunk cchunk;
+ struct mlxsw_sp_acl_atcam_chunk achunk;
};
struct mlxsw_sp2_acl_tcam_entry {
- struct mlxsw_sp_acl_ctcam_entry centry;
+ struct mlxsw_sp_acl_atcam_entry aentry;
struct mlxsw_afa_block *act_block;
};
+static int
+mlxsw_sp2_acl_ctcam_region_entry_insert(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ const char *mask)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion;
+ struct mlxsw_sp_acl_atcam_entry *aentry;
+ struct mlxsw_sp_acl_erp *erp;
+
+ aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion);
+ aentry = mlxsw_sp_acl_tcam_centry_aentry(centry);
+
+ erp = mlxsw_sp_acl_erp_get(aregion, mask, true);
+ if (IS_ERR(erp))
+ return PTR_ERR(erp);
+ aentry->erp = erp;
+
+ return 0;
+}
+
+static void
+mlxsw_sp2_acl_ctcam_region_entry_remove(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion;
+ struct mlxsw_sp_acl_atcam_entry *aentry;
+
+ aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion);
+ aentry = mlxsw_sp_acl_tcam_centry_aentry(centry);
+
+ mlxsw_sp_acl_erp_put(aregion, aentry->erp);
+}
+
+static const struct mlxsw_sp_acl_ctcam_region_ops
+mlxsw_sp2_acl_ctcam_region_ops = {
+ .entry_insert = mlxsw_sp2_acl_ctcam_region_entry_insert,
+ .entry_remove = mlxsw_sp2_acl_ctcam_region_entry_remove,
+};
+
static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
struct mlxsw_sp_acl_tcam *_tcam)
{
@@ -99,9 +140,14 @@ static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
if (err)
goto err_pgcr_write;
+ err = mlxsw_sp_acl_atcam_init(mlxsw_sp, &tcam->atcam);
+ if (err)
+ goto err_atcam_init;
+
mlxsw_afa_block_destroy(afa_block);
return 0;
+err_atcam_init:
err_pgcr_write:
err_pefa_write:
err_afa_block_continue:
@@ -116,22 +162,24 @@ static void mlxsw_sp2_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
{
struct mlxsw_sp2_acl_tcam *tcam = priv;
+ mlxsw_sp_acl_atcam_fini(mlxsw_sp, &tcam->atcam);
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
tcam->kvdl_count, tcam->kvdl_index);
}
static int
mlxsw_sp2_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv,
+ void *tcam_priv,
struct mlxsw_sp_acl_tcam_region *_region)
{
struct mlxsw_sp2_acl_tcam_region *region = region_priv;
- int err;
+ struct mlxsw_sp2_acl_tcam *tcam = tcam_priv;
- err = mlxsw_sp_acl_atcam_region_init(mlxsw_sp, _region);
- if (err)
- return err;
- return mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &region->cregion,
- _region);
+ region->region = _region;
+
+ return mlxsw_sp_acl_atcam_region_init(mlxsw_sp, &tcam->atcam,
+ &region->aregion, _region,
+ &mlxsw_sp2_acl_ctcam_region_ops);
}
static void
@@ -139,7 +187,7 @@ mlxsw_sp2_acl_tcam_region_fini(struct mlxsw_sp *mlxsw_sp, void *region_priv)
{
struct mlxsw_sp2_acl_tcam_region *region = region_priv;
- mlxsw_sp_acl_ctcam_region_fini(&region->cregion);
+ mlxsw_sp_acl_atcam_region_fini(&region->aregion);
}
static int
@@ -155,7 +203,7 @@ static void mlxsw_sp2_acl_tcam_chunk_init(void *region_priv, void *chunk_priv,
struct mlxsw_sp2_acl_tcam_region *region = region_priv;
struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
- mlxsw_sp_acl_ctcam_chunk_init(&region->cregion, &chunk->cchunk,
+ mlxsw_sp_acl_atcam_chunk_init(&region->aregion, &chunk->achunk,
priority);
}
@@ -163,7 +211,7 @@ static void mlxsw_sp2_acl_tcam_chunk_fini(void *chunk_priv)
{
struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
- mlxsw_sp_acl_ctcam_chunk_fini(&chunk->cchunk);
+ mlxsw_sp_acl_atcam_chunk_fini(&chunk->achunk);
}
static int mlxsw_sp2_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
@@ -176,9 +224,9 @@ static int mlxsw_sp2_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
entry->act_block = rulei->act_block;
- return mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &region->cregion,
- &chunk->cchunk, &entry->centry,
- rulei, true);
+ return mlxsw_sp_acl_atcam_entry_add(mlxsw_sp, &region->aregion,
+ &chunk->achunk, &entry->aentry,
+ rulei);
}
static void mlxsw_sp2_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
@@ -189,8 +237,8 @@ static void mlxsw_sp2_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
- mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, &region->cregion,
- &chunk->cchunk, &entry->centry);
+ mlxsw_sp_acl_atcam_entry_del(mlxsw_sp, &region->aregion, &chunk->achunk,
+ &entry->aentry);
}
static int
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
index b1b3b0e0c00e..3a05e0b3f730 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
@@ -34,12 +34,275 @@
*/
#include <linux/kernel.h>
+#include <linux/err.h>
#include <linux/errno.h>
+#include <linux/gfp.h>
+#include <linux/refcount.h>
+#include <linux/rhashtable.h>
#include "reg.h"
#include "core.h"
#include "spectrum.h"
#include "spectrum_acl_tcam.h"
+#include "core_acl_flex_keys.h"
+
+#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_START 6
+#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_END 11
+
+struct mlxsw_sp_acl_atcam_lkey_id_ht_key {
+ char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* MSB blocks */
+ u8 erp_id;
+};
+
+struct mlxsw_sp_acl_atcam_lkey_id {
+ struct rhash_head ht_node;
+ struct mlxsw_sp_acl_atcam_lkey_id_ht_key ht_key;
+ refcount_t refcnt;
+ u32 id;
+};
+
+struct mlxsw_sp_acl_atcam_region_ops {
+ int (*init)(struct mlxsw_sp_acl_atcam_region *aregion);
+ void (*fini)(struct mlxsw_sp_acl_atcam_region *aregion);
+ struct mlxsw_sp_acl_atcam_lkey_id *
+ (*lkey_id_get)(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_rule_info *rulei, u8 erp_id);
+ void (*lkey_id_put)(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id);
+};
+
+struct mlxsw_sp_acl_atcam_region_generic {
+ struct mlxsw_sp_acl_atcam_lkey_id dummy_lkey_id;
+};
+
+struct mlxsw_sp_acl_atcam_region_12kb {
+ struct rhashtable lkey_ht;
+ unsigned int max_lkey_id;
+ unsigned long *used_lkey_id;
+};
+
+static const struct rhashtable_params mlxsw_sp_acl_atcam_lkey_id_ht_params = {
+ .key_len = sizeof(struct mlxsw_sp_acl_atcam_lkey_id_ht_key),
+ .key_offset = offsetof(struct mlxsw_sp_acl_atcam_lkey_id, ht_key),
+ .head_offset = offsetof(struct mlxsw_sp_acl_atcam_lkey_id, ht_node),
+};
+
+static const struct rhashtable_params mlxsw_sp_acl_atcam_entries_ht_params = {
+ .key_len = sizeof(struct mlxsw_sp_acl_atcam_entry_ht_key),
+ .key_offset = offsetof(struct mlxsw_sp_acl_atcam_entry, ht_key),
+ .head_offset = offsetof(struct mlxsw_sp_acl_atcam_entry, ht_node),
+};
+
+static bool
+mlxsw_sp_acl_atcam_is_centry(const struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ return mlxsw_sp_acl_erp_is_ctcam_erp(aentry->erp);
+}
+
+static int
+mlxsw_sp_acl_atcam_region_generic_init(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp_acl_atcam_region_generic *region_generic;
+
+ region_generic = kzalloc(sizeof(*region_generic), GFP_KERNEL);
+ if (!region_generic)
+ return -ENOMEM;
+
+ refcount_set(&region_generic->dummy_lkey_id.refcnt, 1);
+ aregion->priv = region_generic;
+
+ return 0;
+}
+
+static void
+mlxsw_sp_acl_atcam_region_generic_fini(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ kfree(aregion->priv);
+}
+
+static struct mlxsw_sp_acl_atcam_lkey_id *
+mlxsw_sp_acl_atcam_generic_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u8 erp_id)
+{
+ struct mlxsw_sp_acl_atcam_region_generic *region_generic;
+
+ region_generic = aregion->priv;
+ return &region_generic->dummy_lkey_id;
+}
+
+static void
+mlxsw_sp_acl_atcam_generic_lkey_id_put(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id)
+{
+}
+
+static const struct mlxsw_sp_acl_atcam_region_ops
+mlxsw_sp_acl_atcam_region_generic_ops = {
+ .init = mlxsw_sp_acl_atcam_region_generic_init,
+ .fini = mlxsw_sp_acl_atcam_region_generic_fini,
+ .lkey_id_get = mlxsw_sp_acl_atcam_generic_lkey_id_get,
+ .lkey_id_put = mlxsw_sp_acl_atcam_generic_lkey_id_put,
+};
+
+static int
+mlxsw_sp_acl_atcam_region_12kb_init(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp;
+ struct mlxsw_sp_acl_atcam_region_12kb *region_12kb;
+ size_t alloc_size;
+ u64 max_lkey_id;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_LARGE_KEY_ID))
+ return -EIO;
+
+ max_lkey_id = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_LARGE_KEY_ID);
+ region_12kb = kzalloc(sizeof(*region_12kb), GFP_KERNEL);
+ if (!region_12kb)
+ return -ENOMEM;
+
+ alloc_size = BITS_TO_LONGS(max_lkey_id) * sizeof(unsigned long);
+ region_12kb->used_lkey_id = kzalloc(alloc_size, GFP_KERNEL);
+ if (!region_12kb->used_lkey_id) {
+ err = -ENOMEM;
+ goto err_used_lkey_id_alloc;
+ }
+
+ err = rhashtable_init(&region_12kb->lkey_ht,
+ &mlxsw_sp_acl_atcam_lkey_id_ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
+ region_12kb->max_lkey_id = max_lkey_id;
+ aregion->priv = region_12kb;
+
+ return 0;
+
+err_rhashtable_init:
+ kfree(region_12kb->used_lkey_id);
+err_used_lkey_id_alloc:
+ kfree(region_12kb);
+ return err;
+}
+
+static void
+mlxsw_sp_acl_atcam_region_12kb_fini(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
+
+ rhashtable_destroy(&region_12kb->lkey_ht);
+ kfree(region_12kb->used_lkey_id);
+ kfree(region_12kb);
+}
+
+static struct mlxsw_sp_acl_atcam_lkey_id *
+mlxsw_sp_acl_atcam_lkey_id_create(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_lkey_id_ht_key *ht_key)
+{
+ struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
+ u32 id;
+ int err;
+
+ id = find_first_zero_bit(region_12kb->used_lkey_id,
+ region_12kb->max_lkey_id);
+ if (id < region_12kb->max_lkey_id)
+ __set_bit(id, region_12kb->used_lkey_id);
+ else
+ return ERR_PTR(-ENOBUFS);
+
+ lkey_id = kzalloc(sizeof(*lkey_id), GFP_KERNEL);
+ if (!lkey_id) {
+ err = -ENOMEM;
+ goto err_lkey_id_alloc;
+ }
+
+ lkey_id->id = id;
+ memcpy(&lkey_id->ht_key, ht_key, sizeof(*ht_key));
+ refcount_set(&lkey_id->refcnt, 1);
+
+ err = rhashtable_insert_fast(&region_12kb->lkey_ht,
+ &lkey_id->ht_node,
+ mlxsw_sp_acl_atcam_lkey_id_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ return lkey_id;
+
+err_rhashtable_insert:
+ kfree(lkey_id);
+err_lkey_id_alloc:
+ __clear_bit(id, region_12kb->used_lkey_id);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_atcam_lkey_id_destroy(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id)
+{
+ struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
+ u32 id = lkey_id->id;
+
+ rhashtable_remove_fast(&region_12kb->lkey_ht, &lkey_id->ht_node,
+ mlxsw_sp_acl_atcam_lkey_id_ht_params);
+ kfree(lkey_id);
+ __clear_bit(id, region_12kb->used_lkey_id);
+}
+
+static struct mlxsw_sp_acl_atcam_lkey_id *
+mlxsw_sp_acl_atcam_12kb_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u8 erp_id)
+{
+ struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
+ struct mlxsw_sp_acl_tcam_region *region = aregion->region;
+ struct mlxsw_sp_acl_atcam_lkey_id_ht_key ht_key = {{ 0 } };
+ struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
+ struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
+
+ mlxsw_afk_encode(afk, region->key_info, &rulei->values, ht_key.enc_key,
+ NULL, MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_START,
+ MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_END);
+ ht_key.erp_id = erp_id;
+ lkey_id = rhashtable_lookup_fast(&region_12kb->lkey_ht, &ht_key,
+ mlxsw_sp_acl_atcam_lkey_id_ht_params);
+ if (lkey_id) {
+ refcount_inc(&lkey_id->refcnt);
+ return lkey_id;
+ }
+
+ return mlxsw_sp_acl_atcam_lkey_id_create(aregion, &ht_key);
+}
+
+static void
+mlxsw_sp_acl_atcam_12kb_lkey_id_put(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id)
+{
+ if (refcount_dec_and_test(&lkey_id->refcnt))
+ mlxsw_sp_acl_atcam_lkey_id_destroy(aregion, lkey_id);
+}
+
+static const struct mlxsw_sp_acl_atcam_region_ops
+mlxsw_sp_acl_atcam_region_12kb_ops = {
+ .init = mlxsw_sp_acl_atcam_region_12kb_init,
+ .fini = mlxsw_sp_acl_atcam_region_12kb_fini,
+ .lkey_id_get = mlxsw_sp_acl_atcam_12kb_lkey_id_get,
+ .lkey_id_put = mlxsw_sp_acl_atcam_12kb_lkey_id_put,
+};
+
+static const struct mlxsw_sp_acl_atcam_region_ops *
+mlxsw_sp_acl_atcam_region_ops_arr[] = {
+ [MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB] =
+ &mlxsw_sp_acl_atcam_region_generic_ops,
+ [MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB] =
+ &mlxsw_sp_acl_atcam_region_generic_ops,
+ [MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB] =
+ &mlxsw_sp_acl_atcam_region_generic_ops,
+ [MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB] =
+ &mlxsw_sp_acl_atcam_region_12kb_ops,
+};
int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp,
u16 region_id)
@@ -57,39 +320,249 @@ int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perar), perar_pl);
}
-static int mlxsw_sp_acl_atcam_region_param_init(struct mlxsw_sp *mlxsw_sp,
- u16 region_id)
+static void
+mlxsw_sp_acl_atcam_region_type_init(struct mlxsw_sp_acl_atcam_region *aregion)
{
- char percr_pl[MLXSW_REG_PERCR_LEN];
+ struct mlxsw_sp_acl_tcam_region *region = aregion->region;
+ enum mlxsw_sp_acl_atcam_region_type region_type;
+ unsigned int blocks_count;
- mlxsw_reg_percr_pack(percr_pl, region_id);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(percr), percr_pl);
+ /* We already know the blocks count can not exceed the maximum
+ * blocks count.
+ */
+ blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info);
+ if (blocks_count <= 2)
+ region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB;
+ else if (blocks_count <= 4)
+ region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB;
+ else if (blocks_count <= 8)
+ region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB;
+ else
+ region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB;
+
+ aregion->type = region_type;
+ aregion->ops = mlxsw_sp_acl_atcam_region_ops_arr[region_type];
}
-static int
-mlxsw_sp_acl_atcam_region_erp_init(struct mlxsw_sp *mlxsw_sp,
- u16 region_id)
+int
+mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_tcam_region *region,
+ const struct mlxsw_sp_acl_ctcam_region_ops *ops)
+{
+ int err;
+
+ aregion->region = region;
+ aregion->atcam = atcam;
+ mlxsw_sp_acl_atcam_region_type_init(aregion);
+
+ err = rhashtable_init(&aregion->entries_ht,
+ &mlxsw_sp_acl_atcam_entries_ht_params);
+ if (err)
+ return err;
+ err = aregion->ops->init(aregion);
+ if (err)
+ goto err_ops_init;
+ err = mlxsw_sp_acl_erp_region_init(aregion);
+ if (err)
+ goto err_erp_region_init;
+ err = mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &aregion->cregion,
+ region, ops);
+ if (err)
+ goto err_ctcam_region_init;
+
+ return 0;
+
+err_ctcam_region_init:
+ mlxsw_sp_acl_erp_region_fini(aregion);
+err_erp_region_init:
+ aregion->ops->fini(aregion);
+err_ops_init:
+ rhashtable_destroy(&aregion->entries_ht);
+ return err;
+}
+
+void mlxsw_sp_acl_atcam_region_fini(struct mlxsw_sp_acl_atcam_region *aregion)
{
- char pererp_pl[MLXSW_REG_PERERP_LEN];
+ mlxsw_sp_acl_ctcam_region_fini(&aregion->cregion);
+ mlxsw_sp_acl_erp_region_fini(aregion);
+ aregion->ops->fini(aregion);
+ rhashtable_destroy(&aregion->entries_ht);
+}
- mlxsw_reg_pererp_pack(pererp_pl, region_id);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
+void mlxsw_sp_acl_atcam_chunk_init(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ unsigned int priority)
+{
+ mlxsw_sp_acl_ctcam_chunk_init(&aregion->cregion, &achunk->cchunk,
+ priority);
}
-int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region)
+void mlxsw_sp_acl_atcam_chunk_fini(struct mlxsw_sp_acl_atcam_chunk *achunk)
{
+ mlxsw_sp_acl_ctcam_chunk_fini(&achunk->cchunk);
+}
+
+static int
+mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp_acl_tcam_region *region = aregion->region;
+ u8 erp_id = mlxsw_sp_acl_erp_id(aentry->erp);
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
+ char ptce3_pl[MLXSW_REG_PTCE3_LEN];
+ u32 kvdl_index, priority;
int err;
- err = mlxsw_sp_acl_atcam_region_associate(mlxsw_sp, region->id);
+ err = mlxsw_sp_acl_tcam_priority_get(mlxsw_sp, rulei, &priority, true);
if (err)
return err;
- err = mlxsw_sp_acl_atcam_region_param_init(mlxsw_sp, region->id);
+
+ lkey_id = aregion->ops->lkey_id_get(aregion, rulei, erp_id);
+ if (IS_ERR(lkey_id))
+ return PTR_ERR(lkey_id);
+ aentry->lkey_id = lkey_id;
+
+ kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block);
+ mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_WRITE,
+ priority, region->tcam_region_info,
+ aentry->ht_key.enc_key, erp_id,
+ refcount_read(&lkey_id->refcnt) != 1, lkey_id->id,
+ kvdl_index);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl);
if (err)
- return err;
- err = mlxsw_sp_acl_atcam_region_erp_init(mlxsw_sp, region->id);
+ goto err_ptce3_write;
+
+ return 0;
+
+err_ptce3_write:
+ aregion->ops->lkey_id_put(aregion, lkey_id);
+ return err;
+}
+
+static void
+mlxsw_sp_acl_atcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id = aentry->lkey_id;
+ struct mlxsw_sp_acl_tcam_region *region = aregion->region;
+ u8 erp_id = mlxsw_sp_acl_erp_id(aentry->erp);
+ char ptce3_pl[MLXSW_REG_PTCE3_LEN];
+
+ mlxsw_reg_ptce3_pack(ptce3_pl, false, MLXSW_REG_PTCE3_OP_WRITE_WRITE, 0,
+ region->tcam_region_info, aentry->ht_key.enc_key,
+ erp_id, refcount_read(&lkey_id->refcnt) != 1,
+ lkey_id->id, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl);
+ aregion->ops->lkey_id_put(aregion, lkey_id);
+}
+
+static int
+__mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp_acl_tcam_region *region = aregion->region;
+ char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN] = { 0 };
+ struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
+ struct mlxsw_sp_acl_erp *erp;
+ unsigned int blocks_count;
+ int err;
+
+ blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info);
+ mlxsw_afk_encode(afk, region->key_info, &rulei->values,
+ aentry->ht_key.enc_key, mask, 0, blocks_count - 1);
+
+ erp = mlxsw_sp_acl_erp_get(aregion, mask, false);
+ if (IS_ERR(erp))
+ return PTR_ERR(erp);
+ aentry->erp = erp;
+ aentry->ht_key.erp_id = mlxsw_sp_acl_erp_id(erp);
+
+ /* We can't insert identical rules into the A-TCAM, so fail and
+ * let the rule spill into C-TCAM
+ */
+ err = rhashtable_lookup_insert_fast(&aregion->entries_ht,
+ &aentry->ht_node,
+ mlxsw_sp_acl_atcam_entries_ht_params);
if (err)
- return err;
+ goto err_rhashtable_insert;
+
+ err = mlxsw_sp_acl_atcam_region_entry_insert(mlxsw_sp, aregion, aentry,
+ rulei);
+ if (err)
+ goto err_rule_insert;
return 0;
+
+err_rule_insert:
+ rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node,
+ mlxsw_sp_acl_atcam_entries_ht_params);
+err_rhashtable_insert:
+ mlxsw_sp_acl_erp_put(aregion, erp);
+ return err;
+}
+
+static void
+__mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ mlxsw_sp_acl_atcam_region_entry_remove(mlxsw_sp, aregion, aentry);
+ rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node,
+ mlxsw_sp_acl_atcam_entries_ht_params);
+ mlxsw_sp_acl_erp_put(aregion, aentry->erp);
+}
+
+int mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ int err;
+
+ err = __mlxsw_sp_acl_atcam_entry_add(mlxsw_sp, aregion, aentry, rulei);
+ if (!err)
+ return 0;
+
+ /* It is possible we failed to add the rule to the A-TCAM due to
+ * exceeded number of masks. Try to spill into C-TCAM.
+ */
+ err = mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &aregion->cregion,
+ &achunk->cchunk, &aentry->centry,
+ rulei, true);
+ if (!err)
+ return 0;
+
+ return err;
+}
+
+void mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ if (mlxsw_sp_acl_atcam_is_centry(aentry))
+ mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, &aregion->cregion,
+ &achunk->cchunk, &aentry->centry);
+ else
+ __mlxsw_sp_acl_atcam_entry_del(mlxsw_sp, aregion, aentry);
+}
+
+int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam)
+{
+ return mlxsw_sp_acl_erps_init(mlxsw_sp, atcam);
+}
+
+void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam)
+{
+ mlxsw_sp_acl_erps_fini(mlxsw_sp, atcam);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
index ef0d4c0a5a1f..7440a1189250 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
@@ -69,13 +69,15 @@ mlxsw_sp_acl_ctcam_region_move(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region,
- unsigned int offset,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
struct mlxsw_sp_acl_rule_info *rulei,
bool fillup_priority)
{
+ struct mlxsw_sp_acl_tcam_region *region = cregion->region;
struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+ unsigned int blocks_count;
char *act_set;
u32 priority;
char *mask;
@@ -88,10 +90,17 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
return err;
mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
- region->tcam_region_info, offset, priority);
+ region->tcam_region_info,
+ centry->parman_item.index, priority);
key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
- mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask);
+ blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info);
+ mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask, 0,
+ blocks_count - 1);
+
+ err = cregion->ops->entry_insert(cregion, centry, mask);
+ if (err)
+ return err;
/* Only the first action set belongs here, the rest is in KVD */
act_set = mlxsw_afa_block_first_set(rulei->act_block);
@@ -102,14 +111,16 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
static void
mlxsw_sp_acl_ctcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region,
- unsigned int offset)
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry)
{
char ptce2_pl[MLXSW_REG_PTCE2_LEN];
mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
- region->tcam_region_info, offset, 0);
+ cregion->region->tcam_region_info,
+ centry->parman_item.index, 0);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+ cregion->ops->entry_remove(cregion, centry);
}
static int mlxsw_sp_acl_ctcam_region_parman_resize(void *priv,
@@ -147,11 +158,14 @@ static const struct parman_ops mlxsw_sp_acl_ctcam_region_parman_ops = {
.algo = PARMAN_ALGO_TYPE_LSORT,
};
-int mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_ctcam_region *cregion,
- struct mlxsw_sp_acl_tcam_region *region)
+int
+mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_tcam_region *region,
+ const struct mlxsw_sp_acl_ctcam_region_ops *ops)
{
cregion->region = region;
+ cregion->ops = ops;
cregion->parman = parman_create(&mlxsw_sp_acl_ctcam_region_parman_ops,
cregion);
if (!cregion->parman)
@@ -190,8 +204,7 @@ int mlxsw_sp_acl_ctcam_entry_add(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- err = mlxsw_sp_acl_ctcam_region_entry_insert(mlxsw_sp, cregion->region,
- centry->parman_item.index,
+ err = mlxsw_sp_acl_ctcam_region_entry_insert(mlxsw_sp, cregion, centry,
rulei, fillup_priority);
if (err)
goto err_rule_insert;
@@ -208,8 +221,7 @@ void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ctcam_chunk *cchunk,
struct mlxsw_sp_acl_ctcam_entry *centry)
{
- mlxsw_sp_acl_ctcam_region_entry_remove(mlxsw_sp, cregion->region,
- centry->parman_item.index);
+ mlxsw_sp_acl_ctcam_region_entry_remove(mlxsw_sp, cregion, centry);
parman_item_remove(cregion->parman, &cchunk->parman_prio,
&centry->parman_item);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
new file mode 100644
index 000000000000..463590bbb348
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
@@ -0,0 +1,1199 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2018 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/errno.h>
+#include <linux/genalloc.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/rhashtable.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+
+#include "core.h"
+#include "reg.h"
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+
+/* gen_pool_alloc() returns 0 when allocation fails, so use an offset */
+#define MLXSW_SP_ACL_ERP_GENALLOC_OFFSET 0x100
+#define MLXSW_SP_ACL_ERP_MAX_PER_REGION 16
+
+struct mlxsw_sp_acl_erp_core {
+ unsigned int erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX + 1];
+ struct gen_pool *erp_tables;
+ struct mlxsw_sp *mlxsw_sp;
+ unsigned int num_erp_banks;
+};
+
+struct mlxsw_sp_acl_erp_key {
+ char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN];
+ bool ctcam;
+};
+
+struct mlxsw_sp_acl_erp {
+ struct mlxsw_sp_acl_erp_key key;
+ u8 id;
+ u8 index;
+ refcount_t refcnt;
+ DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
+ struct list_head list;
+ struct rhash_head ht_node;
+ struct mlxsw_sp_acl_erp_table *erp_table;
+};
+
+struct mlxsw_sp_acl_erp_master_mask {
+ DECLARE_BITMAP(bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
+ unsigned int count[MLXSW_SP_ACL_TCAM_MASK_LEN];
+};
+
+struct mlxsw_sp_acl_erp_table {
+ struct mlxsw_sp_acl_erp_master_mask master_mask;
+ DECLARE_BITMAP(erp_id_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ DECLARE_BITMAP(erp_index_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ struct list_head atcam_erps_list;
+ struct rhashtable erp_ht;
+ struct mlxsw_sp_acl_erp_core *erp_core;
+ struct mlxsw_sp_acl_atcam_region *aregion;
+ const struct mlxsw_sp_acl_erp_table_ops *ops;
+ unsigned long base_index;
+ unsigned int num_atcam_erps;
+ unsigned int num_max_atcam_erps;
+ unsigned int num_ctcam_erps;
+};
+
+static const struct rhashtable_params mlxsw_sp_acl_erp_ht_params = {
+ .key_len = sizeof(struct mlxsw_sp_acl_erp_key),
+ .key_offset = offsetof(struct mlxsw_sp_acl_erp, key),
+ .head_offset = offsetof(struct mlxsw_sp_acl_erp, ht_node),
+};
+
+struct mlxsw_sp_acl_erp_table_ops {
+ struct mlxsw_sp_acl_erp *
+ (*erp_create)(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key);
+ void (*erp_destroy)(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp);
+};
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key);
+static void
+mlxsw_sp_acl_erp_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp);
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_second_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key);
+static void
+mlxsw_sp_acl_erp_second_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp);
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_first_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key);
+static void
+mlxsw_sp_acl_erp_first_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp);
+static void
+mlxsw_sp_acl_erp_no_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp);
+
+static const struct mlxsw_sp_acl_erp_table_ops erp_multiple_masks_ops = {
+ .erp_create = mlxsw_sp_acl_erp_mask_create,
+ .erp_destroy = mlxsw_sp_acl_erp_mask_destroy,
+};
+
+static const struct mlxsw_sp_acl_erp_table_ops erp_two_masks_ops = {
+ .erp_create = mlxsw_sp_acl_erp_mask_create,
+ .erp_destroy = mlxsw_sp_acl_erp_second_mask_destroy,
+};
+
+static const struct mlxsw_sp_acl_erp_table_ops erp_single_mask_ops = {
+ .erp_create = mlxsw_sp_acl_erp_second_mask_create,
+ .erp_destroy = mlxsw_sp_acl_erp_first_mask_destroy,
+};
+
+static const struct mlxsw_sp_acl_erp_table_ops erp_no_mask_ops = {
+ .erp_create = mlxsw_sp_acl_erp_first_mask_create,
+ .erp_destroy = mlxsw_sp_acl_erp_no_mask_destroy,
+};
+
+bool mlxsw_sp_acl_erp_is_ctcam_erp(const struct mlxsw_sp_acl_erp *erp)
+{
+ return erp->key.ctcam;
+}
+
+u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp)
+{
+ return erp->id;
+}
+
+static unsigned int
+mlxsw_sp_acl_erp_table_entry_size(const struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion = erp_table->aregion;
+ struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
+
+ return erp_core->erpt_entries_size[aregion->type];
+}
+
+static int mlxsw_sp_acl_erp_id_get(struct mlxsw_sp_acl_erp_table *erp_table,
+ u8 *p_id)
+{
+ u8 id;
+
+ id = find_first_zero_bit(erp_table->erp_id_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ if (id < MLXSW_SP_ACL_ERP_MAX_PER_REGION) {
+ __set_bit(id, erp_table->erp_id_bitmap);
+ *p_id = id;
+ return 0;
+ }
+
+ return -ENOBUFS;
+}
+
+static void mlxsw_sp_acl_erp_id_put(struct mlxsw_sp_acl_erp_table *erp_table,
+ u8 id)
+{
+ __clear_bit(id, erp_table->erp_id_bitmap);
+}
+
+static void
+mlxsw_sp_acl_erp_master_mask_bit_set(unsigned long bit,
+ struct mlxsw_sp_acl_erp_master_mask *mask)
+{
+ if (mask->count[bit]++ == 0)
+ __set_bit(bit, mask->bitmap);
+}
+
+static void
+mlxsw_sp_acl_erp_master_mask_bit_clear(unsigned long bit,
+ struct mlxsw_sp_acl_erp_master_mask *mask)
+{
+ if (--mask->count[bit] == 0)
+ __clear_bit(bit, mask->bitmap);
+}
+
+static int
+mlxsw_sp_acl_erp_master_mask_update(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
+ struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
+ char percr_pl[MLXSW_REG_PERCR_LEN];
+ char *master_mask;
+
+ mlxsw_reg_percr_pack(percr_pl, region->id);
+ master_mask = mlxsw_reg_percr_master_mask_data(percr_pl);
+ bitmap_to_arr32((u32 *) master_mask, erp_table->master_mask.bitmap,
+ MLXSW_SP_ACL_TCAM_MASK_LEN);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(percr), percr_pl);
+}
+
+static int
+mlxsw_sp_acl_erp_master_mask_set(struct mlxsw_sp_acl_erp_table *erp_table,
+ const struct mlxsw_sp_acl_erp *erp)
+{
+ unsigned long bit;
+ int err;
+
+ for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ mlxsw_sp_acl_erp_master_mask_bit_set(bit,
+ &erp_table->master_mask);
+
+ err = mlxsw_sp_acl_erp_master_mask_update(erp_table);
+ if (err)
+ goto err_master_mask_update;
+
+ return 0;
+
+err_master_mask_update:
+ for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ mlxsw_sp_acl_erp_master_mask_bit_clear(bit,
+ &erp_table->master_mask);
+ return err;
+}
+
+static int
+mlxsw_sp_acl_erp_master_mask_clear(struct mlxsw_sp_acl_erp_table *erp_table,
+ const struct mlxsw_sp_acl_erp *erp)
+{
+ unsigned long bit;
+ int err;
+
+ for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ mlxsw_sp_acl_erp_master_mask_bit_clear(bit,
+ &erp_table->master_mask);
+
+ err = mlxsw_sp_acl_erp_master_mask_update(erp_table);
+ if (err)
+ goto err_master_mask_update;
+
+ return 0;
+
+err_master_mask_update:
+ for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ mlxsw_sp_acl_erp_master_mask_bit_set(bit,
+ &erp_table->master_mask);
+ return err;
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_generic_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ erp = kzalloc(sizeof(*erp), GFP_KERNEL);
+ if (!erp)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlxsw_sp_acl_erp_id_get(erp_table, &erp->id);
+ if (err)
+ goto err_erp_id_get;
+
+ memcpy(&erp->key, key, sizeof(*key));
+ bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask,
+ MLXSW_SP_ACL_TCAM_MASK_LEN);
+ list_add(&erp->list, &erp_table->atcam_erps_list);
+ refcount_set(&erp->refcnt, 1);
+ erp_table->num_atcam_erps++;
+ erp->erp_table = erp_table;
+
+ err = mlxsw_sp_acl_erp_master_mask_set(erp_table, erp);
+ if (err)
+ goto err_master_mask_set;
+
+ err = rhashtable_insert_fast(&erp_table->erp_ht, &erp->ht_node,
+ mlxsw_sp_acl_erp_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ return erp;
+
+err_rhashtable_insert:
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
+err_master_mask_set:
+ erp_table->num_atcam_erps--;
+ list_del(&erp->list);
+ mlxsw_sp_acl_erp_id_put(erp_table, erp->id);
+err_erp_id_get:
+ kfree(erp);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_erp_generic_destroy(struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
+
+ rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
+ mlxsw_sp_acl_erp_ht_params);
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
+ erp_table->num_atcam_erps--;
+ list_del(&erp->list);
+ mlxsw_sp_acl_erp_id_put(erp_table, erp->id);
+ kfree(erp);
+}
+
+static int
+mlxsw_sp_acl_erp_table_alloc(struct mlxsw_sp_acl_erp_core *erp_core,
+ unsigned int num_erps,
+ enum mlxsw_sp_acl_atcam_region_type region_type,
+ unsigned long *p_index)
+{
+ unsigned int num_rows, entry_size;
+
+ /* We only allow allocations of entire rows */
+ if (num_erps % erp_core->num_erp_banks != 0)
+ return -EINVAL;
+
+ entry_size = erp_core->erpt_entries_size[region_type];
+ num_rows = num_erps / erp_core->num_erp_banks;
+
+ *p_index = gen_pool_alloc(erp_core->erp_tables, num_rows * entry_size);
+ if (*p_index == 0)
+ return -ENOBUFS;
+ *p_index -= MLXSW_SP_ACL_ERP_GENALLOC_OFFSET;
+
+ return 0;
+}
+
+static void
+mlxsw_sp_acl_erp_table_free(struct mlxsw_sp_acl_erp_core *erp_core,
+ unsigned int num_erps,
+ enum mlxsw_sp_acl_atcam_region_type region_type,
+ unsigned long index)
+{
+ unsigned long base_index;
+ unsigned int entry_size;
+ size_t size;
+
+ entry_size = erp_core->erpt_entries_size[region_type];
+ base_index = index + MLXSW_SP_ACL_ERP_GENALLOC_OFFSET;
+ size = num_erps / erp_core->num_erp_banks * entry_size;
+ gen_pool_free(erp_core->erp_tables, base_index, size);
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_table_master_rp(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ if (!list_is_singular(&erp_table->atcam_erps_list))
+ return NULL;
+
+ return list_first_entry(&erp_table->atcam_erps_list,
+ struct mlxsw_sp_acl_erp, list);
+}
+
+static int mlxsw_sp_acl_erp_index_get(struct mlxsw_sp_acl_erp_table *erp_table,
+ u8 *p_index)
+{
+ u8 index;
+
+ index = find_first_zero_bit(erp_table->erp_index_bitmap,
+ erp_table->num_max_atcam_erps);
+ if (index < erp_table->num_max_atcam_erps) {
+ __set_bit(index, erp_table->erp_index_bitmap);
+ *p_index = index;
+ return 0;
+ }
+
+ return -ENOBUFS;
+}
+
+static void mlxsw_sp_acl_erp_index_put(struct mlxsw_sp_acl_erp_table *erp_table,
+ u8 index)
+{
+ __clear_bit(index, erp_table->erp_index_bitmap);
+}
+
+static void
+mlxsw_sp_acl_erp_table_locate(const struct mlxsw_sp_acl_erp_table *erp_table,
+ const struct mlxsw_sp_acl_erp *erp,
+ u8 *p_erpt_bank, u8 *p_erpt_index)
+{
+ unsigned int entry_size = mlxsw_sp_acl_erp_table_entry_size(erp_table);
+ struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
+ unsigned int row;
+
+ *p_erpt_bank = erp->index % erp_core->num_erp_banks;
+ row = erp->index / erp_core->num_erp_banks;
+ *p_erpt_index = erp_table->base_index + row * entry_size;
+}
+
+static int
+mlxsw_sp_acl_erp_table_erp_add(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ enum mlxsw_reg_perpt_key_size key_size;
+ char perpt_pl[MLXSW_REG_PERPT_LEN];
+ u8 erpt_bank, erpt_index;
+
+ mlxsw_sp_acl_erp_table_locate(erp_table, erp, &erpt_bank, &erpt_index);
+ key_size = (enum mlxsw_reg_perpt_key_size) erp_table->aregion->type;
+ mlxsw_reg_perpt_pack(perpt_pl, erpt_bank, erpt_index, key_size, erp->id,
+ 0, erp_table->base_index, erp->index,
+ erp->key.mask);
+ mlxsw_reg_perpt_erp_vector_pack(perpt_pl, erp_table->erp_index_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ mlxsw_reg_perpt_erp_vector_set(perpt_pl, erp->index, true);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perpt), perpt_pl);
+}
+
+static void mlxsw_sp_acl_erp_table_erp_del(struct mlxsw_sp_acl_erp *erp)
+{
+ char empty_mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN] = { 0 };
+ struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ enum mlxsw_reg_perpt_key_size key_size;
+ char perpt_pl[MLXSW_REG_PERPT_LEN];
+ u8 erpt_bank, erpt_index;
+
+ mlxsw_sp_acl_erp_table_locate(erp_table, erp, &erpt_bank, &erpt_index);
+ key_size = (enum mlxsw_reg_perpt_key_size) erp_table->aregion->type;
+ mlxsw_reg_perpt_pack(perpt_pl, erpt_bank, erpt_index, key_size, erp->id,
+ 0, erp_table->base_index, erp->index, empty_mask);
+ mlxsw_reg_perpt_erp_vector_pack(perpt_pl, erp_table->erp_index_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ mlxsw_reg_perpt_erp_vector_set(perpt_pl, erp->index, false);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perpt), perpt_pl);
+}
+
+static int
+mlxsw_sp_acl_erp_table_enable(struct mlxsw_sp_acl_erp_table *erp_table,
+ bool ctcam_le)
+{
+ struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ char pererp_pl[MLXSW_REG_PERERP_LEN];
+
+ mlxsw_reg_pererp_pack(pererp_pl, region->id, ctcam_le, true, 0,
+ erp_table->base_index, 0);
+ mlxsw_reg_pererp_erp_vector_pack(pererp_pl, erp_table->erp_index_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
+}
+
+static void
+mlxsw_sp_acl_erp_table_disable(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ char pererp_pl[MLXSW_REG_PERERP_LEN];
+ struct mlxsw_sp_acl_erp *master_rp;
+
+ master_rp = mlxsw_sp_acl_erp_table_master_rp(erp_table);
+ /* It is possible we do not have a master RP when we disable the
+ * table when there are no rules in the A-TCAM and the last C-TCAM
+ * rule is deleted
+ */
+ mlxsw_reg_pererp_pack(pererp_pl, region->id, false, false, 0, 0,
+ master_rp ? master_rp->id : 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
+}
+
+static int
+mlxsw_sp_acl_erp_table_relocate(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ list_for_each_entry(erp, &erp_table->atcam_erps_list, list) {
+ err = mlxsw_sp_acl_erp_table_erp_add(erp_table, erp);
+ if (err)
+ goto err_table_erp_add;
+ }
+
+ return 0;
+
+err_table_erp_add:
+ list_for_each_entry_continue_reverse(erp, &erp_table->atcam_erps_list,
+ list)
+ mlxsw_sp_acl_erp_table_erp_del(erp);
+ return err;
+}
+
+static int
+mlxsw_sp_acl_erp_table_expand(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ unsigned int num_erps, old_num_erps = erp_table->num_max_atcam_erps;
+ struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
+ unsigned long old_base_index = erp_table->base_index;
+ bool ctcam_le = erp_table->num_ctcam_erps > 0;
+ int err;
+
+ if (erp_table->num_atcam_erps < erp_table->num_max_atcam_erps)
+ return 0;
+
+ if (erp_table->num_max_atcam_erps == MLXSW_SP_ACL_ERP_MAX_PER_REGION)
+ return -ENOBUFS;
+
+ num_erps = old_num_erps + erp_core->num_erp_banks;
+ err = mlxsw_sp_acl_erp_table_alloc(erp_core, num_erps,
+ erp_table->aregion->type,
+ &erp_table->base_index);
+ if (err)
+ return err;
+ erp_table->num_max_atcam_erps = num_erps;
+
+ err = mlxsw_sp_acl_erp_table_relocate(erp_table);
+ if (err)
+ goto err_table_relocate;
+
+ err = mlxsw_sp_acl_erp_table_enable(erp_table, ctcam_le);
+ if (err)
+ goto err_table_enable;
+
+ mlxsw_sp_acl_erp_table_free(erp_core, old_num_erps,
+ erp_table->aregion->type, old_base_index);
+
+ return 0;
+
+err_table_enable:
+err_table_relocate:
+ erp_table->num_max_atcam_erps = old_num_erps;
+ mlxsw_sp_acl_erp_table_free(erp_core, num_erps,
+ erp_table->aregion->type,
+ erp_table->base_index);
+ erp_table->base_index = old_base_index;
+ return err;
+}
+
+static int
+mlxsw_sp_acl_erp_region_table_trans(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
+ struct mlxsw_sp_acl_erp *master_rp;
+ int err;
+
+ /* Initially, allocate a single eRP row. Expand later as needed */
+ err = mlxsw_sp_acl_erp_table_alloc(erp_core, erp_core->num_erp_banks,
+ erp_table->aregion->type,
+ &erp_table->base_index);
+ if (err)
+ return err;
+ erp_table->num_max_atcam_erps = erp_core->num_erp_banks;
+
+ /* Transition the sole RP currently configured (the master RP)
+ * to the eRP table
+ */
+ master_rp = mlxsw_sp_acl_erp_table_master_rp(erp_table);
+ if (!master_rp) {
+ err = -EINVAL;
+ goto err_table_master_rp;
+ }
+
+ /* Maintain the same eRP bank for the master RP, so that we
+ * wouldn't need to update the bloom filter
+ */
+ master_rp->index = master_rp->index % erp_core->num_erp_banks;
+ __set_bit(master_rp->index, erp_table->erp_index_bitmap);
+
+ err = mlxsw_sp_acl_erp_table_erp_add(erp_table, master_rp);
+ if (err)
+ goto err_table_master_rp_add;
+
+ err = mlxsw_sp_acl_erp_table_enable(erp_table, false);
+ if (err)
+ goto err_table_enable;
+
+ return 0;
+
+err_table_enable:
+ mlxsw_sp_acl_erp_table_erp_del(master_rp);
+err_table_master_rp_add:
+ __clear_bit(master_rp->index, erp_table->erp_index_bitmap);
+err_table_master_rp:
+ mlxsw_sp_acl_erp_table_free(erp_core, erp_table->num_max_atcam_erps,
+ erp_table->aregion->type,
+ erp_table->base_index);
+ return err;
+}
+
+static void
+mlxsw_sp_acl_erp_region_master_mask_trans(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
+ struct mlxsw_sp_acl_erp *master_rp;
+
+ mlxsw_sp_acl_erp_table_disable(erp_table);
+ master_rp = mlxsw_sp_acl_erp_table_master_rp(erp_table);
+ if (!master_rp)
+ return;
+ mlxsw_sp_acl_erp_table_erp_del(master_rp);
+ __clear_bit(master_rp->index, erp_table->erp_index_bitmap);
+ mlxsw_sp_acl_erp_table_free(erp_core, erp_table->num_max_atcam_erps,
+ erp_table->aregion->type,
+ erp_table->base_index);
+}
+
+static int
+mlxsw_sp_acl_erp_region_erp_add(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ bool ctcam_le = erp_table->num_ctcam_erps > 0;
+ char pererp_pl[MLXSW_REG_PERERP_LEN];
+
+ mlxsw_reg_pererp_pack(pererp_pl, region->id, ctcam_le, true, 0,
+ erp_table->base_index, 0);
+ mlxsw_reg_pererp_erp_vector_pack(pererp_pl, erp_table->erp_index_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ mlxsw_reg_pererp_erpt_vector_set(pererp_pl, erp->index, true);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
+}
+
+static void mlxsw_sp_acl_erp_region_erp_del(struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
+ struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region;
+ struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp;
+ bool ctcam_le = erp_table->num_ctcam_erps > 0;
+ char pererp_pl[MLXSW_REG_PERERP_LEN];
+
+ mlxsw_reg_pererp_pack(pererp_pl, region->id, ctcam_le, true, 0,
+ erp_table->base_index, 0);
+ mlxsw_reg_pererp_erp_vector_pack(pererp_pl, erp_table->erp_index_bitmap,
+ MLXSW_SP_ACL_ERP_MAX_PER_REGION);
+ mlxsw_reg_pererp_erpt_vector_set(pererp_pl, erp->index, false);
+
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
+}
+
+static int
+mlxsw_sp_acl_erp_region_ctcam_enable(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ /* No need to re-enable lookup in the C-TCAM */
+ if (erp_table->num_ctcam_erps > 1)
+ return 0;
+
+ return mlxsw_sp_acl_erp_table_enable(erp_table, true);
+}
+
+static void
+mlxsw_sp_acl_erp_region_ctcam_disable(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ /* Only disable C-TCAM lookup when last C-TCAM eRP is deleted */
+ if (erp_table->num_ctcam_erps > 1)
+ return;
+
+ mlxsw_sp_acl_erp_table_enable(erp_table, false);
+}
+
+static void
+mlxsw_sp_acl_erp_ctcam_table_ops_set(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ switch (erp_table->num_atcam_erps) {
+ case 2:
+ /* Keep using the eRP table, but correctly set the
+ * operations pointer so that when an A-TCAM eRP is
+ * deleted we will transition to use the master mask
+ */
+ erp_table->ops = &erp_two_masks_ops;
+ break;
+ case 1:
+ /* We only kept the eRP table because we had C-TCAM
+ * eRPs in use. Now that the last C-TCAM eRP is gone we
+ * can stop using the table and transition to use the
+ * master mask
+ */
+ mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
+ erp_table->ops = &erp_single_mask_ops;
+ break;
+ case 0:
+ /* There are no more eRPs of any kind used by the region
+ * so free its eRP table and transition to initial state
+ */
+ mlxsw_sp_acl_erp_table_disable(erp_table);
+ mlxsw_sp_acl_erp_table_free(erp_table->erp_core,
+ erp_table->num_max_atcam_erps,
+ erp_table->aregion->type,
+ erp_table->base_index);
+ erp_table->ops = &erp_no_mask_ops;
+ break;
+ default:
+ break;
+ }
+}
+
+static struct mlxsw_sp_acl_erp *
+__mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ erp = kzalloc(sizeof(*erp), GFP_KERNEL);
+ if (!erp)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&erp->key, key, sizeof(*key));
+ bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask,
+ MLXSW_SP_ACL_TCAM_MASK_LEN);
+ refcount_set(&erp->refcnt, 1);
+ erp_table->num_ctcam_erps++;
+ erp->erp_table = erp_table;
+
+ err = mlxsw_sp_acl_erp_master_mask_set(erp_table, erp);
+ if (err)
+ goto err_master_mask_set;
+
+ err = rhashtable_insert_fast(&erp_table->erp_ht, &erp->ht_node,
+ mlxsw_sp_acl_erp_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ err = mlxsw_sp_acl_erp_region_ctcam_enable(erp_table);
+ if (err)
+ goto err_erp_region_ctcam_enable;
+
+ /* When C-TCAM is used, the eRP table must be used */
+ erp_table->ops = &erp_multiple_masks_ops;
+
+ return erp;
+
+err_erp_region_ctcam_enable:
+ rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
+ mlxsw_sp_acl_erp_ht_params);
+err_rhashtable_insert:
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
+err_master_mask_set:
+ erp_table->num_ctcam_erps--;
+ kfree(erp);
+ return ERR_PTR(err);
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ /* There is a special situation where we need to spill rules
+ * into the C-TCAM, yet the region is still using a master
+ * mask and thus not performing a lookup in the C-TCAM. This
+ * can happen when two rules that only differ in priority - and
+ * thus sharing the same key - are programmed. In this case
+ * we transition the region to use an eRP table
+ */
+ err = mlxsw_sp_acl_erp_region_table_trans(erp_table);
+ if (err)
+ return ERR_PTR(err);
+
+ erp = __mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
+ if (IS_ERR(erp)) {
+ err = PTR_ERR(erp);
+ goto err_erp_create;
+ }
+
+ return erp;
+
+err_erp_create:
+ mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_erp_ctcam_mask_destroy(struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
+
+ mlxsw_sp_acl_erp_region_ctcam_disable(erp_table);
+ rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
+ mlxsw_sp_acl_erp_ht_params);
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
+ erp_table->num_ctcam_erps--;
+ kfree(erp);
+
+ /* Once the last C-TCAM eRP was destroyed, the state we
+ * transition to depends on the number of A-TCAM eRPs currently
+ * in use
+ */
+ if (erp_table->num_ctcam_erps > 0)
+ return;
+ mlxsw_sp_acl_erp_ctcam_table_ops_set(erp_table);
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ if (key->ctcam)
+ return __mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
+
+ /* Expand the eRP table for the new eRP, if needed */
+ err = mlxsw_sp_acl_erp_table_expand(erp_table);
+ if (err)
+ return ERR_PTR(err);
+
+ erp = mlxsw_sp_acl_erp_generic_create(erp_table, key);
+ if (IS_ERR(erp))
+ return erp;
+
+ err = mlxsw_sp_acl_erp_index_get(erp_table, &erp->index);
+ if (err)
+ goto err_erp_index_get;
+
+ err = mlxsw_sp_acl_erp_table_erp_add(erp_table, erp);
+ if (err)
+ goto err_table_erp_add;
+
+ err = mlxsw_sp_acl_erp_region_erp_add(erp_table, erp);
+ if (err)
+ goto err_region_erp_add;
+
+ erp_table->ops = &erp_multiple_masks_ops;
+
+ return erp;
+
+err_region_erp_add:
+ mlxsw_sp_acl_erp_table_erp_del(erp);
+err_table_erp_add:
+ mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
+err_erp_index_get:
+ mlxsw_sp_acl_erp_generic_destroy(erp);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_erp_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ if (erp->key.ctcam)
+ return mlxsw_sp_acl_erp_ctcam_mask_destroy(erp);
+
+ mlxsw_sp_acl_erp_region_erp_del(erp);
+ mlxsw_sp_acl_erp_table_erp_del(erp);
+ mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
+ mlxsw_sp_acl_erp_generic_destroy(erp);
+
+ if (erp_table->num_atcam_erps == 2 && erp_table->num_ctcam_erps == 0)
+ erp_table->ops = &erp_two_masks_ops;
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_second_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+ int err;
+
+ if (key->ctcam)
+ return mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
+
+ /* Transition to use eRP table instead of master mask */
+ err = mlxsw_sp_acl_erp_region_table_trans(erp_table);
+ if (err)
+ return ERR_PTR(err);
+
+ erp = mlxsw_sp_acl_erp_generic_create(erp_table, key);
+ if (IS_ERR(erp)) {
+ err = PTR_ERR(erp);
+ goto err_erp_create;
+ }
+
+ err = mlxsw_sp_acl_erp_index_get(erp_table, &erp->index);
+ if (err)
+ goto err_erp_index_get;
+
+ err = mlxsw_sp_acl_erp_table_erp_add(erp_table, erp);
+ if (err)
+ goto err_table_erp_add;
+
+ err = mlxsw_sp_acl_erp_region_erp_add(erp_table, erp);
+ if (err)
+ goto err_region_erp_add;
+
+ erp_table->ops = &erp_two_masks_ops;
+
+ return erp;
+
+err_region_erp_add:
+ mlxsw_sp_acl_erp_table_erp_del(erp);
+err_table_erp_add:
+ mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
+err_erp_index_get:
+ mlxsw_sp_acl_erp_generic_destroy(erp);
+err_erp_create:
+ mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_erp_second_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ if (erp->key.ctcam)
+ return mlxsw_sp_acl_erp_ctcam_mask_destroy(erp);
+
+ mlxsw_sp_acl_erp_region_erp_del(erp);
+ mlxsw_sp_acl_erp_table_erp_del(erp);
+ mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
+ mlxsw_sp_acl_erp_generic_destroy(erp);
+ /* Transition to use master mask instead of eRP table */
+ mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
+
+ erp_table->ops = &erp_single_mask_ops;
+}
+
+static struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_first_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
+{
+ struct mlxsw_sp_acl_erp *erp;
+
+ if (key->ctcam)
+ return ERR_PTR(-EINVAL);
+
+ erp = mlxsw_sp_acl_erp_generic_create(erp_table, key);
+ if (IS_ERR(erp))
+ return erp;
+
+ erp_table->ops = &erp_single_mask_ops;
+
+ return erp;
+}
+
+static void
+mlxsw_sp_acl_erp_first_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ mlxsw_sp_acl_erp_generic_destroy(erp);
+ erp_table->ops = &erp_no_mask_ops;
+}
+
+static void
+mlxsw_sp_acl_erp_no_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ WARN_ON(1);
+}
+
+struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion,
+ const char *mask, bool ctcam)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+ struct mlxsw_sp_acl_erp_key key;
+ struct mlxsw_sp_acl_erp *erp;
+
+ /* eRPs are allocated from a shared resource, but currently all
+ * allocations are done under RTNL.
+ */
+ ASSERT_RTNL();
+
+ memcpy(key.mask, mask, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
+ key.ctcam = ctcam;
+ erp = rhashtable_lookup_fast(&erp_table->erp_ht, &key,
+ mlxsw_sp_acl_erp_ht_params);
+ if (erp) {
+ refcount_inc(&erp->refcnt);
+ return erp;
+ }
+
+ return erp_table->ops->erp_create(erp_table, &key);
+}
+
+void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+
+ ASSERT_RTNL();
+
+ if (!refcount_dec_and_test(&erp->refcnt))
+ return;
+
+ erp_table->ops->erp_destroy(erp_table, erp);
+}
+
+static struct mlxsw_sp_acl_erp_table *
+mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table;
+ int err;
+
+ erp_table = kzalloc(sizeof(*erp_table), GFP_KERNEL);
+ if (!erp_table)
+ return ERR_PTR(-ENOMEM);
+
+ err = rhashtable_init(&erp_table->erp_ht, &mlxsw_sp_acl_erp_ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
+ erp_table->erp_core = aregion->atcam->erp_core;
+ erp_table->ops = &erp_no_mask_ops;
+ INIT_LIST_HEAD(&erp_table->atcam_erps_list);
+ erp_table->aregion = aregion;
+
+ return erp_table;
+
+err_rhashtable_init:
+ kfree(erp_table);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_erp_table_destroy(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ WARN_ON(!list_empty(&erp_table->atcam_erps_list));
+ rhashtable_destroy(&erp_table->erp_ht);
+ kfree(erp_table);
+}
+
+static int
+mlxsw_sp_acl_erp_master_mask_init(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp;
+ char percr_pl[MLXSW_REG_PERCR_LEN];
+
+ mlxsw_reg_percr_pack(percr_pl, aregion->region->id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(percr), percr_pl);
+}
+
+static int
+mlxsw_sp_acl_erp_region_param_init(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp;
+ char pererp_pl[MLXSW_REG_PERERP_LEN];
+
+ mlxsw_reg_pererp_pack(pererp_pl, aregion->region->id, false, false, 0,
+ 0, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
+}
+
+int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ struct mlxsw_sp_acl_erp_table *erp_table;
+ int err;
+
+ erp_table = mlxsw_sp_acl_erp_table_create(aregion);
+ if (IS_ERR(erp_table))
+ return PTR_ERR(erp_table);
+ aregion->erp_table = erp_table;
+
+ /* Initialize the region's master mask to all zeroes */
+ err = mlxsw_sp_acl_erp_master_mask_init(aregion);
+ if (err)
+ goto err_erp_master_mask_init;
+
+ /* Initialize the region to not use the eRP table */
+ err = mlxsw_sp_acl_erp_region_param_init(aregion);
+ if (err)
+ goto err_erp_region_param_init;
+
+ return 0;
+
+err_erp_region_param_init:
+err_erp_master_mask_init:
+ mlxsw_sp_acl_erp_table_destroy(erp_table);
+ return err;
+}
+
+void mlxsw_sp_acl_erp_region_fini(struct mlxsw_sp_acl_atcam_region *aregion)
+{
+ mlxsw_sp_acl_erp_table_destroy(aregion->erp_table);
+}
+
+static int
+mlxsw_sp_acl_erp_tables_sizes_query(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_erp_core *erp_core)
+{
+ unsigned int size;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_2KB) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_4KB) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_8KB) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_12KB))
+ return -EIO;
+
+ size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_2KB);
+ erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB] = size;
+
+ size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_4KB);
+ erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB] = size;
+
+ size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_8KB);
+ erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB] = size;
+
+ size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_12KB);
+ erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB] = size;
+
+ return 0;
+}
+
+static int mlxsw_sp_acl_erp_tables_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_erp_core *erp_core)
+{
+ unsigned int erpt_bank_size;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_ERPT_BANK_SIZE) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_ERPT_BANKS))
+ return -EIO;
+ erpt_bank_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_MAX_ERPT_BANK_SIZE);
+ erp_core->num_erp_banks = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_MAX_ERPT_BANKS);
+
+ erp_core->erp_tables = gen_pool_create(0, -1);
+ if (!erp_core->erp_tables)
+ return -ENOMEM;
+ gen_pool_set_algo(erp_core->erp_tables, gen_pool_best_fit, NULL);
+
+ err = gen_pool_add(erp_core->erp_tables,
+ MLXSW_SP_ACL_ERP_GENALLOC_OFFSET, erpt_bank_size,
+ -1);
+ if (err)
+ goto err_gen_pool_add;
+
+ /* Different regions require masks of different sizes */
+ err = mlxsw_sp_acl_erp_tables_sizes_query(mlxsw_sp, erp_core);
+ if (err)
+ goto err_erp_tables_sizes_query;
+
+ return 0;
+
+err_erp_tables_sizes_query:
+err_gen_pool_add:
+ gen_pool_destroy(erp_core->erp_tables);
+ return err;
+}
+
+static void mlxsw_sp_acl_erp_tables_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_erp_core *erp_core)
+{
+ gen_pool_destroy(erp_core->erp_tables);
+}
+
+int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam)
+{
+ struct mlxsw_sp_acl_erp_core *erp_core;
+ int err;
+
+ erp_core = kzalloc(sizeof(*erp_core), GFP_KERNEL);
+ if (!erp_core)
+ return -ENOMEM;
+ erp_core->mlxsw_sp = mlxsw_sp;
+ atcam->erp_core = erp_core;
+
+ err = mlxsw_sp_acl_erp_tables_init(mlxsw_sp, erp_core);
+ if (err)
+ goto err_erp_tables_init;
+
+ return 0;
+
+err_erp_tables_init:
+ kfree(erp_core);
+ return err;
+}
+
+void mlxsw_sp_acl_erps_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam)
+{
+ mlxsw_sp_acl_erp_tables_fini(mlxsw_sp, atcam->erp_core);
+ kfree(atcam->erp_core);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 310fd87895b8..245e2f473c6f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -577,7 +577,7 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_tcam_region_enable;
- err = ops->region_init(mlxsw_sp, region->priv, region);
+ err = ops->region_init(mlxsw_sp, region->priv, tcam->priv, region);
if (err)
goto err_tcam_region_init;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index 6403ec4f5267..881ade760ace 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -92,6 +92,9 @@ mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
+#define MLXSW_SP_ACL_TCAM_MASK_LEN \
+ (MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN * BITS_PER_BYTE)
+
struct mlxsw_sp_acl_tcam_group;
struct mlxsw_sp_acl_tcam_region {
@@ -109,6 +112,7 @@ struct mlxsw_sp_acl_tcam_region {
struct mlxsw_sp_acl_ctcam_region {
struct parman *parman;
+ const struct mlxsw_sp_acl_ctcam_region_ops *ops;
struct mlxsw_sp_acl_tcam_region *region;
};
@@ -120,9 +124,19 @@ struct mlxsw_sp_acl_ctcam_entry {
struct parman_item parman_item;
};
-int mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_ctcam_region *cregion,
- struct mlxsw_sp_acl_tcam_region *region);
+struct mlxsw_sp_acl_ctcam_region_ops {
+ int (*entry_insert)(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ const char *mask);
+ void (*entry_remove)(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry);
+};
+
+int
+mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_tcam_region *region,
+ const struct mlxsw_sp_acl_ctcam_region_ops *ops);
void mlxsw_sp_acl_ctcam_region_fini(struct mlxsw_sp_acl_ctcam_region *cregion);
void mlxsw_sp_acl_ctcam_chunk_init(struct mlxsw_sp_acl_ctcam_region *cregion,
struct mlxsw_sp_acl_ctcam_chunk *cchunk,
@@ -144,9 +158,102 @@ mlxsw_sp_acl_ctcam_entry_offset(struct mlxsw_sp_acl_ctcam_entry *centry)
return centry->parman_item.index;
}
+enum mlxsw_sp_acl_atcam_region_type {
+ MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB,
+ MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB,
+ MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB,
+ MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB,
+ __MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX,
+};
+
+#define MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX \
+ (__MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX - 1)
+
+struct mlxsw_sp_acl_atcam {
+ struct mlxsw_sp_acl_erp_core *erp_core;
+};
+
+struct mlxsw_sp_acl_atcam_region {
+ struct rhashtable entries_ht; /* A-TCAM only */
+ struct mlxsw_sp_acl_ctcam_region cregion;
+ const struct mlxsw_sp_acl_atcam_region_ops *ops;
+ struct mlxsw_sp_acl_tcam_region *region;
+ struct mlxsw_sp_acl_atcam *atcam;
+ enum mlxsw_sp_acl_atcam_region_type type;
+ struct mlxsw_sp_acl_erp_table *erp_table;
+ void *priv;
+};
+
+struct mlxsw_sp_acl_atcam_entry_ht_key {
+ char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key */
+ u8 erp_id;
+};
+
+struct mlxsw_sp_acl_atcam_chunk {
+ struct mlxsw_sp_acl_ctcam_chunk cchunk;
+};
+
+struct mlxsw_sp_acl_atcam_entry {
+ struct rhash_head ht_node;
+ struct mlxsw_sp_acl_atcam_entry_ht_key ht_key;
+ struct mlxsw_sp_acl_ctcam_entry centry;
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
+ struct mlxsw_sp_acl_erp *erp;
+};
+
+static inline struct mlxsw_sp_acl_atcam_region *
+mlxsw_sp_acl_tcam_cregion_aregion(struct mlxsw_sp_acl_ctcam_region *cregion)
+{
+ return container_of(cregion, struct mlxsw_sp_acl_atcam_region, cregion);
+}
+
+static inline struct mlxsw_sp_acl_atcam_entry *
+mlxsw_sp_acl_tcam_centry_aentry(struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+ return container_of(centry, struct mlxsw_sp_acl_atcam_entry, centry);
+}
+
int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp,
u16 region_id);
-int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region);
+int
+mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_tcam_region *region,
+ const struct mlxsw_sp_acl_ctcam_region_ops *ops);
+void mlxsw_sp_acl_atcam_region_fini(struct mlxsw_sp_acl_atcam_region *aregion);
+void mlxsw_sp_acl_atcam_chunk_init(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ unsigned int priority);
+void mlxsw_sp_acl_atcam_chunk_fini(struct mlxsw_sp_acl_atcam_chunk *achunk);
+int mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei);
+void mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ struct mlxsw_sp_acl_atcam_entry *aentry);
+int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam);
+void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam);
+
+struct mlxsw_sp_acl_erp;
+
+bool mlxsw_sp_acl_erp_is_ctcam_erp(const struct mlxsw_sp_acl_erp *erp);
+u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp);
+struct mlxsw_sp_acl_erp *
+mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion,
+ const char *mask, bool ctcam);
+void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp *erp);
+int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion);
+void mlxsw_sp_acl_erp_region_fini(struct mlxsw_sp_acl_atcam_region *aregion);
+int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam);
+void mlxsw_sp_acl_erps_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam *atcam);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
index b6ed7f7c531e..c31aeb25ab5a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -1,6 +1,6 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
@@ -255,6 +255,270 @@ static int mlxsw_sp_dcbnl_ieee_setets(struct net_device *dev,
return 0;
}
+static int mlxsw_sp_dcbnl_app_validate(struct net_device *dev,
+ struct dcb_app *app)
+{
+ int prio;
+
+ if (app->priority >= IEEE_8021QAZ_MAX_TCS) {
+ netdev_err(dev, "APP entry with priority value %u is invalid\n",
+ app->priority);
+ return -EINVAL;
+ }
+
+ switch (app->selector) {
+ case IEEE_8021QAZ_APP_SEL_DSCP:
+ if (app->protocol >= 64) {
+ netdev_err(dev, "DSCP APP entry with protocol value %u is invalid\n",
+ app->protocol);
+ return -EINVAL;
+ }
+
+ /* Warn about any DSCP APP entries with the same PID. */
+ prio = fls(dcb_ieee_getapp_mask(dev, app));
+ if (prio--) {
+ if (prio < app->priority)
+ netdev_warn(dev, "Choosing priority %d for DSCP %d in favor of previously-active value of %d\n",
+ app->priority, app->protocol, prio);
+ else if (prio > app->priority)
+ netdev_warn(dev, "Ignoring new priority %d for DSCP %d in favor of current value of %d\n",
+ app->priority, app->protocol, prio);
+ }
+ break;
+
+ case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
+ if (app->protocol) {
+ netdev_err(dev, "EtherType APP entries with protocol value != 0 not supported\n");
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ netdev_err(dev, "APP entries with selector %u not supported\n",
+ app->selector);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u8
+mlxsw_sp_port_dcb_app_default_prio(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ u8 prio_mask;
+
+ prio_mask = dcb_ieee_getapp_default_prio_mask(mlxsw_sp_port->dev);
+ if (prio_mask)
+ /* Take the highest configured priority. */
+ return fls(prio_mask) - 1;
+
+ return 0;
+}
+
+static void
+mlxsw_sp_port_dcb_app_dscp_prio_map(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 default_prio,
+ struct dcb_ieee_app_dscp_map *map)
+{
+ int i;
+
+ dcb_ieee_getapp_dscp_prio_mask_map(mlxsw_sp_port->dev, map);
+ for (i = 0; i < ARRAY_SIZE(map->map); ++i) {
+ if (map->map[i])
+ map->map[i] = fls(map->map[i]) - 1;
+ else
+ map->map[i] = default_prio;
+ }
+}
+
+static bool
+mlxsw_sp_port_dcb_app_prio_dscp_map(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct dcb_ieee_app_prio_map *map)
+{
+ bool have_dscp = false;
+ int i;
+
+ dcb_ieee_getapp_prio_dscp_mask_map(mlxsw_sp_port->dev, map);
+ for (i = 0; i < ARRAY_SIZE(map->map); ++i) {
+ if (map->map[i]) {
+ map->map[i] = fls64(map->map[i]) - 1;
+ have_dscp = true;
+ }
+ }
+
+ return have_dscp;
+}
+
+static int
+mlxsw_sp_port_dcb_app_update_qpts(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qpts_trust_state ts)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qpts_pl[MLXSW_REG_QPTS_LEN];
+
+ mlxsw_reg_qpts_pack(qpts_pl, mlxsw_sp_port->local_port, ts);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpts), qpts_pl);
+}
+
+static int
+mlxsw_sp_port_dcb_app_update_qrwe(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool rewrite_dscp)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qrwe_pl[MLXSW_REG_QRWE_LEN];
+
+ mlxsw_reg_qrwe_pack(qrwe_pl, mlxsw_sp_port->local_port,
+ false, rewrite_dscp);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qrwe), qrwe_pl);
+}
+
+static int
+mlxsw_sp_port_dcb_toggle_trust(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qpts_trust_state ts)
+{
+ bool rewrite_dscp = ts == MLXSW_REG_QPTS_TRUST_STATE_DSCP;
+ int err;
+
+ if (mlxsw_sp_port->dcb.trust_state == ts)
+ return 0;
+
+ err = mlxsw_sp_port_dcb_app_update_qpts(mlxsw_sp_port, ts);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_port_dcb_app_update_qrwe(mlxsw_sp_port, rewrite_dscp);
+ if (err)
+ goto err_update_qrwe;
+
+ mlxsw_sp_port->dcb.trust_state = ts;
+ return 0;
+
+err_update_qrwe:
+ mlxsw_sp_port_dcb_app_update_qpts(mlxsw_sp_port,
+ mlxsw_sp_port->dcb.trust_state);
+ return err;
+}
+
+static int
+mlxsw_sp_port_dcb_app_update_qpdpm(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct dcb_ieee_app_dscp_map *map)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qpdpm_pl[MLXSW_REG_QPDPM_LEN];
+ short int i;
+
+ mlxsw_reg_qpdpm_pack(qpdpm_pl, mlxsw_sp_port->local_port);
+ for (i = 0; i < ARRAY_SIZE(map->map); ++i)
+ mlxsw_reg_qpdpm_dscp_pack(qpdpm_pl, i, map->map[i]);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpdpm), qpdpm_pl);
+}
+
+static int
+mlxsw_sp_port_dcb_app_update_qpdsm(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct dcb_ieee_app_prio_map *map)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qpdsm_pl[MLXSW_REG_QPDSM_LEN];
+ short int i;
+
+ mlxsw_reg_qpdsm_pack(qpdsm_pl, mlxsw_sp_port->local_port);
+ for (i = 0; i < ARRAY_SIZE(map->map); ++i)
+ mlxsw_reg_qpdsm_prio_pack(qpdsm_pl, i, map->map[i]);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpdsm), qpdsm_pl);
+}
+
+static int mlxsw_sp_port_dcb_app_update(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct dcb_ieee_app_prio_map prio_map;
+ struct dcb_ieee_app_dscp_map dscp_map;
+ u8 default_prio;
+ bool have_dscp;
+ int err;
+
+ default_prio = mlxsw_sp_port_dcb_app_default_prio(mlxsw_sp_port);
+ have_dscp = mlxsw_sp_port_dcb_app_prio_dscp_map(mlxsw_sp_port,
+ &prio_map);
+
+ if (!have_dscp) {
+ err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
+ MLXSW_REG_QPTS_TRUST_STATE_PCP);
+ if (err)
+ netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L2\n");
+ return err;
+ }
+
+ mlxsw_sp_port_dcb_app_dscp_prio_map(mlxsw_sp_port, default_prio,
+ &dscp_map);
+ err = mlxsw_sp_port_dcb_app_update_qpdpm(mlxsw_sp_port,
+ &dscp_map);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Couldn't configure priority map\n");
+ return err;
+ }
+
+ err = mlxsw_sp_port_dcb_app_update_qpdsm(mlxsw_sp_port,
+ &prio_map);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Couldn't configure DSCP rewrite map\n");
+ return err;
+ }
+
+ err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
+ MLXSW_REG_QPTS_TRUST_STATE_DSCP);
+ if (err) {
+ /* A failure to set trust DSCP means that the QPDPM and QPDSM
+ * maps installed above are not in effect. And since we are here
+ * attempting to set trust DSCP, we couldn't have attempted to
+ * switch trust to PCP. Thus no cleanup is necessary.
+ */
+ netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L3\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_dcbnl_ieee_setapp(struct net_device *dev,
+ struct dcb_app *app)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err;
+
+ err = mlxsw_sp_dcbnl_app_validate(dev, app);
+ if (err)
+ return err;
+
+ err = dcb_ieee_setapp(dev, app);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_port_dcb_app_update(mlxsw_sp_port);
+ if (err)
+ goto err_update;
+
+ return 0;
+
+err_update:
+ dcb_ieee_delapp(dev, app);
+ return err;
+}
+
+static int mlxsw_sp_dcbnl_ieee_delapp(struct net_device *dev,
+ struct dcb_app *app)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err;
+
+ err = dcb_ieee_delapp(dev, app);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_port_dcb_app_update(mlxsw_sp_port);
+ if (err)
+ netdev_err(dev, "Failed to update DCB APP configuration\n");
+ return 0;
+}
+
static int mlxsw_sp_dcbnl_ieee_getmaxrate(struct net_device *dev,
struct ieee_maxrate *maxrate)
{
@@ -394,6 +658,8 @@ static const struct dcbnl_rtnl_ops mlxsw_sp_dcbnl_ops = {
.ieee_setmaxrate = mlxsw_sp_dcbnl_ieee_setmaxrate,
.ieee_getpfc = mlxsw_sp_dcbnl_ieee_getpfc,
.ieee_setpfc = mlxsw_sp_dcbnl_ieee_setpfc,
+ .ieee_setapp = mlxsw_sp_dcbnl_ieee_setapp,
+ .ieee_delapp = mlxsw_sp_dcbnl_ieee_delapp,
.getdcbx = mlxsw_sp_dcbnl_getdcbx,
.setdcbx = mlxsw_sp_dcbnl_setdcbx,
@@ -467,6 +733,7 @@ int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port)
if (err)
goto err_port_pfc_init;
+ mlxsw_sp_port->dcb.trust_state = MLXSW_REG_QPTS_TRUST_STATE_PCP;
mlxsw_sp_port->dev->dcbnl_ops = &mlxsw_sp_dcbnl_ops;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 8d67f0123699..eec7166fad62 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -2436,17 +2436,48 @@ static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
kfree(net_work);
}
+static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
+
+static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
+{
+ struct mlxsw_sp_netevent_work *net_work =
+ container_of(work, struct mlxsw_sp_netevent_work, work);
+ struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
+
+ __mlxsw_sp_router_init(mlxsw_sp);
+ kfree(net_work);
+}
+
+static int mlxsw_sp_router_schedule_work(struct net *net,
+ struct notifier_block *nb,
+ void (*cb)(struct work_struct *))
+{
+ struct mlxsw_sp_netevent_work *net_work;
+ struct mlxsw_sp_router *router;
+
+ if (!net_eq(net, &init_net))
+ return NOTIFY_DONE;
+
+ net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
+ if (!net_work)
+ return NOTIFY_BAD;
+
+ router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
+ INIT_WORK(&net_work->work, cb);
+ net_work->mlxsw_sp = router->mlxsw_sp;
+ mlxsw_core_schedule_work(&net_work->work);
+ return NOTIFY_DONE;
+}
+
static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct mlxsw_sp_netevent_work *net_work;
struct mlxsw_sp_port *mlxsw_sp_port;
- struct mlxsw_sp_router *router;
struct mlxsw_sp *mlxsw_sp;
unsigned long interval;
struct neigh_parms *p;
struct neighbour *n;
- struct net *net;
switch (event) {
case NETEVENT_DELAY_PROBE_TIME_UPDATE:
@@ -2500,20 +2531,12 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
break;
case NETEVENT_IPV4_MPATH_HASH_UPDATE:
case NETEVENT_IPV6_MPATH_HASH_UPDATE:
- net = ptr;
+ return mlxsw_sp_router_schedule_work(ptr, nb,
+ mlxsw_sp_router_mp_hash_event_work);
- if (!net_eq(net, &init_net))
- return NOTIFY_DONE;
-
- net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
- if (!net_work)
- return NOTIFY_BAD;
-
- router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
- INIT_WORK(&net_work->work, mlxsw_sp_router_mp_hash_event_work);
- net_work->mlxsw_sp = router->mlxsw_sp;
- mlxsw_core_schedule_work(&net_work->work);
- break;
+ case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
+ return mlxsw_sp_router_schedule_work(ptr, nb,
+ mlxsw_sp_router_update_priority_work);
}
return NOTIFY_DONE;
@@ -7382,6 +7405,7 @@ static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
+ bool usp = init_net.ipv4.sysctl_ip_fwd_update_priority;
char rgcr_pl[MLXSW_REG_RGCR_LEN];
u64 max_rifs;
int err;
@@ -7392,7 +7416,7 @@ static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
- mlxsw_reg_rgcr_usp_set(rgcr_pl, true);
+ mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
if (err)
return err;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index cd41911013e9..bb323f269839 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -2941,7 +2941,7 @@ static int lan743x_pm_resume(struct device *dev)
return 0;
}
-const struct dev_pm_ops lan743x_pm_ops = {
+static const struct dev_pm_ops lan743x_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume)
};
#endif /*CONFIG_PM */
diff --git a/drivers/net/ethernet/neterion/Kconfig b/drivers/net/ethernet/neterion/Kconfig
index 71899009c468..c26e0f70c494 100644
--- a/drivers/net/ethernet/neterion/Kconfig
+++ b/drivers/net/ethernet/neterion/Kconfig
@@ -2,8 +2,8 @@
# Exar device configuration
#
-config NET_VENDOR_EXAR
- bool "Exar devices"
+config NET_VENDOR_NETERION
+ bool "Neterion (Exar) devices"
default y
depends on PCI
---help---
@@ -11,16 +11,19 @@ config NET_VENDOR_EXAR
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
- the questions about Exar cards. If you say Y, you will be asked for
- your specific card in the following questions.
+ the questions about Neterion/Exar cards. If you say Y, you will be
+ asked for your specific card in the following questions.
-if NET_VENDOR_EXAR
+if NET_VENDOR_NETERION
config S2IO
- tristate "Exar Xframe 10Gb Ethernet Adapter"
+ tristate "Neterion (Exar) Xframe 10Gb Ethernet Adapter"
depends on PCI
---help---
This driver supports Exar Corp's Xframe Series 10Gb Ethernet Adapters.
+ These were originally released from S2IO, which renamed itself
+ Neterion. So, the adapters might be labeled as either one, depending
+ on its age.
More specific information on configuring the driver is in
<file:Documentation/networking/s2io.txt>.
@@ -29,11 +32,13 @@ config S2IO
will be called s2io.
config VXGE
- tristate "Exar X3100 Series 10GbE PCIe Server Adapter"
+ tristate "Neterion (Exar) X3100 Series 10GbE PCIe Server Adapter"
depends on PCI
---help---
This driver supports Exar Corp's X3100 Series 10 GbE PCIe
- I/O Virtualized Server Adapter.
+ I/O Virtualized Server Adapter. These were originally released from
+ Neterion, which was later acquired by Exar. So, the adapters might be
+ labeled as either one, depending on its age.
More specific information on configuring the driver is in
<file:Documentation/networking/vxge.txt>.
@@ -50,4 +55,4 @@ config VXGE_DEBUG_TRACE_ALL
the vxge driver. By default only few debug trace statements are
enabled.
-endif # NET_VENDOR_EXAR
+endif # NET_VENDOR_NETERION
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 358ed6118881..a2c0a93ca8b6 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -14,7 +14,6 @@
#include <linux/vmalloc.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
-#include <linux/pci_hotplug.h>
#include <linux/slab.h>
#include "vxge-traffic.h"
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 1decf3a1cad3..e57d23746585 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -80,7 +80,7 @@ nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
return NFP_REPR_TYPE_VF;
}
- return NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC;
+ return __NFP_REPR_TYPE_MAX;
}
static struct net_device *
@@ -91,6 +91,8 @@ nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
u8 port = 0;
repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port);
+ if (repr_type > NFP_REPR_TYPE_MAX)
+ return NULL;
reprs = rcu_dereference(app->reprs[repr_type]);
if (!reprs)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 78afe75129ab..382bb93cb090 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -317,7 +317,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
payload.dst_ipv4 = flow->daddr;
/* If entry has expired send dst IP with all other fields 0. */
- if (!(neigh->nud_state & NUD_VALID)) {
+ if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
/* Trigger ARP to verify invalid neighbour state. */
neigh_event_send(neigh, NULL);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 152283d7e59c..4a540c5e27fe 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -236,16 +236,20 @@ static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf)
int err;
pf->limit_vfs = nfp_rtsym_read_le(pf->rtbl, "nfd_vf_cfg_max_vfs", &err);
- if (!err)
- return pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs);
+ if (err) {
+ /* For backwards compatibility if symbol not found allow all */
+ pf->limit_vfs = ~0;
+ if (err == -ENOENT)
+ return 0;
- pf->limit_vfs = ~0;
- /* Allow any setting for backwards compatibility if symbol not found */
- if (err == -ENOENT)
- return 0;
+ nfp_warn(pf->cpp, "Warning: VF limit read failed: %d\n", err);
+ return err;
+ }
- nfp_warn(pf->cpp, "Warning: VF limit read failed: %d\n", err);
- return err;
+ err = pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs);
+ if (err)
+ nfp_warn(pf->cpp, "Failed to set VF count in sysfs: %d\n", err);
+ return 0;
}
static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 8970ec981e11..439e6ffe2f05 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -250,7 +250,7 @@ struct nfp_net_tx_ring {
struct nfp_net_tx_desc *txds;
dma_addr_t dma;
- unsigned int size;
+ size_t size;
bool is_xdp;
} ____cacheline_aligned;
@@ -350,9 +350,9 @@ struct nfp_net_rx_buf {
* @qcp_fl: Pointer to base of the QCP freelist queue
* @rxbufs: Array of transmitted FL/RX buffers
* @rxds: Virtual address of FL/RX ring in host memory
+ * @xdp_rxq: RX-ring info avail for XDP
* @dma: DMA address of the FL/RX ring
* @size: Size, in bytes, of the FL/RX ring (needed to free)
- * @xdp_rxq: RX-ring info avail for XDP
*/
struct nfp_net_rx_ring {
struct nfp_net_r_vector *r_vec;
@@ -364,14 +364,15 @@ struct nfp_net_rx_ring {
u32 idx;
int fl_qcidx;
- unsigned int size;
u8 __iomem *qcp_fl;
struct nfp_net_rx_buf *rxbufs;
struct nfp_net_rx_desc *rxds;
- dma_addr_t dma;
struct xdp_rxq_info xdp_rxq;
+
+ dma_addr_t dma;
+ size_t size;
} ____cacheline_aligned;
/**
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index cf1704e972b7..7c1a921d178d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -53,6 +53,8 @@
#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/mm.h>
+#include <linux/overflow.h>
#include <linux/page_ref.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
@@ -1120,7 +1122,7 @@ nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
tx_ring->rd_p++;
}
- memset(tx_ring->txds, 0, sizeof(*tx_ring->txds) * tx_ring->cnt);
+ memset(tx_ring->txds, 0, tx_ring->size);
tx_ring->wr_p = 0;
tx_ring->rd_p = 0;
tx_ring->qcp_rd_p = 0;
@@ -1300,7 +1302,7 @@ static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
rx_ring->rxbufs[last_idx].dma_addr = 0;
rx_ring->rxbufs[last_idx].frag = NULL;
- memset(rx_ring->rxds, 0, sizeof(*rx_ring->rxds) * rx_ring->cnt);
+ memset(rx_ring->rxds, 0, rx_ring->size);
rx_ring->wr_p = 0;
rx_ring->rd_p = 0;
}
@@ -2126,7 +2128,7 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
- kfree(tx_ring->txbufs);
+ kvfree(tx_ring->txbufs);
if (tx_ring->txds)
dma_free_coherent(dp->dev, tx_ring->size,
@@ -2150,18 +2152,17 @@ static int
nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
- int sz;
tx_ring->cnt = dp->txd_cnt;
- tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt;
+ tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
if (!tx_ring->txds)
goto err_alloc;
- sz = sizeof(*tx_ring->txbufs) * tx_ring->cnt;
- tx_ring->txbufs = kzalloc(sz, GFP_KERNEL);
+ tx_ring->txbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->txbufs),
+ GFP_KERNEL);
if (!tx_ring->txbufs)
goto err_alloc;
@@ -2275,7 +2276,7 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
if (dp->netdev)
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
- kfree(rx_ring->rxbufs);
+ kvfree(rx_ring->rxbufs);
if (rx_ring->rxds)
dma_free_coherent(dp->dev, rx_ring->size,
@@ -2298,7 +2299,7 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
static int
nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
{
- int sz, err;
+ int err;
if (dp->netdev) {
err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
@@ -2308,14 +2309,14 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
}
rx_ring->cnt = dp->rxd_cnt;
- rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
+ rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
if (!rx_ring->rxds)
goto err_alloc;
- sz = sizeof(*rx_ring->rxbufs) * rx_ring->cnt;
- rx_ring->rxbufs = kzalloc(sz, GFP_KERNEL);
+ rx_ring->rxbufs = kvcalloc(rx_ring->cnt, sizeof(*rx_ring->rxbufs),
+ GFP_KERNEL);
if (!rx_ring->rxbufs)
goto err_alloc;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 7cbd0174459c..1d9b0d44ddb6 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -5777,7 +5777,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
(np->rx_ring_size +
np->tx_ring_size),
&np->ring_addr,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!np->rx_ring.orig)
goto out_unmap;
np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
@@ -5786,7 +5786,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
sizeof(struct ring_desc_ex) *
(np->rx_ring_size +
np->tx_ring_size),
- &np->ring_addr, GFP_ATOMIC);
+ &np->ring_addr, GFP_KERNEL);
if (!np->rx_ring.ex)
goto out_unmap;
np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index b5b5ff725426..f1977aa440e5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -1531,7 +1531,7 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
}
/* CM PF */
-void qed_cm_init_pf(struct qed_hwfn *p_hwfn)
+static void qed_cm_init_pf(struct qed_hwfn *p_hwfn)
{
/* XCM pure-LB queue */
STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 12b4c2ab5796..d02e774c8d66 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -867,7 +867,7 @@ static int qed_dcbx_read_mib(struct qed_hwfn *p_hwfn,
return rc;
}
-void qed_dcbx_aen(struct qed_hwfn *hwfn, u32 mib_type)
+static void qed_dcbx_aen(struct qed_hwfn *hwfn, u32 mib_type)
{
struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
void *cookie = hwfn->cdev->ops_cookie;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 4340c4c90bcb..1aa9fc1c5890 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -7838,8 +7838,8 @@ int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
}
-int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
- enum qed_nvm_images image_id, u32 *length)
+static int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
+ enum qed_nvm_images image_id, u32 *length)
{
struct qed_nvm_image_att image_att;
int rc;
@@ -7854,8 +7854,9 @@ int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
return rc;
}
-int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
- u32 *num_dumped_bytes, enum qed_nvm_images image_id)
+static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes,
+ enum qed_nvm_images image_id)
{
struct qed_hwfn *p_hwfn =
&cdev->hwfns[cdev->dbg_params.engine_for_debug];
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index e5249b4741d0..6a0b46f214f4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -230,12 +230,12 @@ static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn)
}
/* Getters for resource amounts necessary for qm initialization */
-u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn)
+static u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn)
{
return p_hwfn->hw_info.num_hw_tc;
}
-u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn)
+static u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn)
{
return IS_QED_SRIOV(p_hwfn->cdev) ?
p_hwfn->cdev->p_iov_info->total_vfs : 0;
@@ -243,7 +243,7 @@ u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn)
#define NUM_DEFAULT_RLS 1
-u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn)
+static u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn)
{
u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn);
@@ -261,7 +261,7 @@ u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn)
return num_pf_rls;
}
-u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn)
+static u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn)
{
u32 pq_flags = qed_get_pq_flags(p_hwfn);
@@ -273,7 +273,7 @@ u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn)
}
/* calc amount of PQs according to the requested flags */
-u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn)
+static u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn)
{
u32 pq_flags = qed_get_pq_flags(p_hwfn);
@@ -507,16 +507,6 @@ u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf)
return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
}
-u16 qed_get_cm_pq_idx_rl(struct qed_hwfn *p_hwfn, u8 rl)
-{
- u16 max_rl = qed_init_qm_get_num_pf_rls(p_hwfn);
-
- if (rl > max_rl)
- DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl);
-
- return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl;
-}
-
/* Functions for creating specific types of pqs */
static void qed_init_qm_lb_pq(struct qed_hwfn *p_hwfn)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index d845badf9b90..d6430dfebd83 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -1225,19 +1225,6 @@ void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
0);
}
-void qed_set_gft_event_id_cm_hdr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
-{
- u32 rfs_cm_hdr_event_id;
-
- /* Set RFS event ID to be awakened i Tstorm By Prs */
- rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
- rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
- PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
- rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
- PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
- qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
-}
-
void qed_gft_config(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 pf_id,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index c0d4a54a5edb..1135387bd99d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -873,8 +873,8 @@ static void qed_iscsi_release_connection(struct qed_hwfn *p_hwfn,
spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
}
-void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_conn *p_conn)
+static void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_conn *p_conn)
{
qed_chain_free(p_hwfn->cdev, &p_conn->xhq);
qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 90a2b53096e2..17f3dfa2cc94 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -377,7 +377,7 @@ qed_iwarp2roce_state(enum qed_iwarp_qp_state state)
}
}
-const char *iwarp_state_names[] = {
+const static char *iwarp_state_names[] = {
"IDLE",
"RTS",
"TERMINATE",
@@ -942,7 +942,7 @@ qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
}
-void
+static void
qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
{
struct mpa_v2_hdr *mpa_v2_params;
@@ -967,7 +967,7 @@ qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
mpa_data_size;
}
-void
+static void
qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
{
struct qed_iwarp_cm_event_params params;
@@ -2500,7 +2500,7 @@ static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
/* The only slowpath for iwarp ll2 is unalign flush. When this completion
* is received, need to reset the FPDU.
*/
-void
+static void
qed_iwarp_ll2_slowpath(void *cxt,
u8 connection_handle,
u32 opaque_data_0, u32 opaque_data_1)
@@ -2803,8 +2803,9 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return qed_iwarp_ll2_stop(p_hwfn, p_ptt);
}
-void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
- struct qed_iwarp_ep *ep, u8 fw_return_code)
+static void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_ep *ep,
+ u8 fw_return_code)
{
struct qed_iwarp_cm_event_params params;
@@ -2824,8 +2825,9 @@ void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
ep->event_cb(ep->cb_context, &params);
}
-void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn,
- struct qed_iwarp_ep *ep, int fw_ret_code)
+static void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_ep *ep,
+ int fw_ret_code)
{
struct qed_iwarp_cm_event_params params;
bool event_cb = false;
@@ -2954,7 +2956,7 @@ qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn,
}
}
-void
+static void
qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn,
struct qed_iwarp_ep *ep, u8 fw_return_code)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 99973e10b179..5ede6408649d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -665,7 +665,7 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
p_ramrod->common.update_approx_mcast_flg = 1;
for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
- u32 *p_bins = (u32 *)p_params->bins;
+ u32 *p_bins = p_params->bins;
p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
}
@@ -1476,8 +1476,8 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data)
{
- unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
struct vport_update_ramrod_data *p_ramrod = NULL;
+ u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
u8 abs_vport_id = 0;
@@ -1513,26 +1513,25 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
/* explicitly clear out the entire vector */
memset(&p_ramrod->approx_mcast.bins, 0,
sizeof(p_ramrod->approx_mcast.bins));
- memset(bins, 0, sizeof(unsigned long) *
- ETH_MULTICAST_MAC_BINS_IN_REGS);
+ memset(bins, 0, sizeof(bins));
/* filter ADD op is explicit set op and it removes
* any existing filters for the vport
*/
if (p_filter_cmd->opcode == QED_FILTER_ADD) {
for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
- u32 bit;
+ u32 bit, nbits;
bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
- __set_bit(bit, bins);
+ nbits = sizeof(u32) * BITS_PER_BYTE;
+ bins[bit / nbits] |= 1 << (bit % nbits);
}
/* Convert to correct endianity */
for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
struct vport_update_ramrod_mcast *p_ramrod_bins;
- u32 *p_bins = (u32 *)bins;
p_ramrod_bins = &p_ramrod->approx_mcast;
- p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]);
+ p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]);
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index 806a8da257e9..8d80f1095d17 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -215,7 +215,7 @@ struct qed_sp_vport_update_params {
u8 anti_spoofing_en;
u8 update_accept_any_vlan_flg;
u8 accept_any_vlan;
- unsigned long bins[8];
+ u32 bins[8];
struct qed_rss_params *rss_params;
struct qed_filter_accept_flags accept_flags;
struct qed_sge_tpa_params *sge_tpa_params;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 012973d75ad0..14ac9cab2653 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -158,7 +158,8 @@ static void qed_ll2_kill_buffers(struct qed_dev *cdev)
qed_ll2_dealloc_buffer(cdev, buffer);
}
-void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
+static void qed_ll2b_complete_rx_packet(void *cxt,
+ struct qed_ll2_comp_rx_data *data)
{
struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_buffer *buffer = data->cookie;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 15f1b3294289..8e4f60e4520a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -570,12 +570,13 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
return 0;
}
-int qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 cmd,
- u32 param,
- u32 *o_mcp_resp,
- u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf)
+static int
+qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf)
{
struct qed_mcp_mb_params mb_params;
int rc;
@@ -1211,6 +1212,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
break;
default:
p_link->speed = 0;
+ p_link->link_up = 0;
}
if (p_link->link_up && p_link->speed)
@@ -1308,9 +1310,15 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
phy_cfg.adv_speed = params->speed.advertised_speeds;
phy_cfg.loopback_mode = params->loopback_mode;
- if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
- if (params->eee.enable)
- phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
+
+ /* There are MFWs that share this capability regardless of whether
+ * this is feasible or not. And given that at the very least adv_caps
+ * would be set internally by qed, we want to make sure LFA would
+ * still work.
+ */
+ if ((p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) {
+ phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
if (params->eee.tx_lpi_enable)
phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
if (params->eee.adv_caps & QED_EEE_1G_ADV)
@@ -3001,7 +3009,7 @@ static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
return rc;
}
-int
+static int
__qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_resc_lock_params *p_params)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 101d677114f2..be941cfaa2d4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -134,7 +134,7 @@ static bool qed_bmap_is_empty(struct qed_bmap *bmap)
return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count);
}
-u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
+static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
{
/* First sb id for RoCE is after all the l2 sb */
return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
@@ -706,7 +706,7 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
return qed_rdma_start_fw(p_hwfn, params, p_ptt);
}
-int qed_rdma_stop(void *rdma_cxt)
+static int qed_rdma_stop(void *rdma_cxt)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_close_func_ramrod_data *p_ramrod;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index b5ce1581645f..ada4c1810864 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -157,7 +157,7 @@ static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
return flavor;
}
-void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
+static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
{
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index fd59cf45f4be..9b08a9d9e151 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -672,8 +672,8 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
return 0;
}
-bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
- int vfid, bool b_fail_malicious)
+static bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
+ int vfid, bool b_fail_malicious)
{
/* Check PF supports sriov */
if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
@@ -687,7 +687,7 @@ bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
return true;
}
-bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
+static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
{
return _qed_iov_pf_sanity_check(p_hwfn, vfid, true);
}
@@ -2831,7 +2831,7 @@ qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
p_data->update_approx_mcast_flg = 1;
memcpy(p_data->bins, p_mcast_tlv->bins,
- sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+ sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
}
@@ -3979,7 +3979,7 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
}
}
-void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events)
+static void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events)
{
int i;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 2d7fcd6a0777..3d4269659820 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -169,7 +169,7 @@ static void qed_vf_pf_add_qid(struct qed_hwfn *p_hwfn,
p_qid_tlv->qid = p_cid->qid_usage_idx;
}
-int _qed_vf_pf_release(struct qed_hwfn *p_hwfn, bool b_final)
+static int _qed_vf_pf_release(struct qed_hwfn *p_hwfn, bool b_final)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_def_resp_tlv *resp;
@@ -1126,7 +1126,7 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
resp_size += sizeof(struct pfvf_def_resp_tlv);
memcpy(p_mcast_tlv->bins, p_params->bins,
- sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+ sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
}
update_rx = p_params->accept_flags.update_rx_mode_config;
@@ -1272,7 +1272,7 @@ void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
u32 bit;
bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
- __set_bit(bit, sp_params.bins);
+ sp_params.bins[bit / 32] |= 1 << (bit % 32);
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index 4f05d5eb3cf5..033409db86ae 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -392,7 +392,12 @@ struct vfpf_vport_update_mcast_bin_tlv {
struct channel_tlv tl;
u8 padding[4];
- u64 bins[8];
+ /* There are only 256 approx bins, and in HSI they're divided into
+ * 32-bit values. As old VFs used to set-bit to the values on its side,
+ * the upper half of the array is never expected to contain any data.
+ */
+ u64 bins[4];
+ u64 obsolete_bins[4];
};
struct vfpf_vport_update_accept_param_tlv {
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 49a6e25ddc2b..8ea1fa36ca43 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -7396,8 +7396,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return rc;
}
- /* override BIOS settings, use userspace tools to enable WOL */
- __rtl8169_set_wol(tp, 0);
+ tp->saved_wolopts = __rtl8169_get_wol(tp);
mutex_init(&tp->wk.mutex);
u64_stats_init(&tp->rx_stats.syncp);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d9e60cfd8a85..9d104a05044d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -53,7 +53,7 @@
#include "dwmac1000.h"
#include "hwif.h"
-#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
+#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
/* Module parameters */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 8d375e51a526..6a393b16a1fc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -257,7 +257,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
return -ENOMEM;
/* Enable pci device */
- ret = pcim_enable_device(pdev);
+ ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
__func__);
@@ -300,9 +300,45 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
static void stmmac_pci_remove(struct pci_dev *pdev)
{
stmmac_dvr_remove(&pdev->dev);
+ pci_disable_device(pdev);
}
-static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_suspend, stmmac_resume);
+static int stmmac_pci_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = stmmac_suspend(dev);
+ if (ret)
+ return ret;
+
+ ret = pci_save_state(pdev);
+ if (ret)
+ return ret;
+
+ pci_disable_device(pdev);
+ pci_wake_from_d3(pdev, true);
+ return 0;
+}
+
+static int stmmac_pci_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ pci_restore_state(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ return stmmac_resume(dev);
+}
+
+static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
/* synthetic ID, no official vendor */
#define PCI_VENDOR_ID_STMMAC 0x700
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 171abcfb6184..f051ce35a440 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -565,40 +565,28 @@ static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
(func)(slave++, ##arg); \
} while (0)
-#define cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb) \
- do { \
- if (!cpsw->data.dual_emac) \
- break; \
- if (CPDMA_RX_SOURCE_PORT(status) == 1) { \
- ndev = cpsw->slaves[0].ndev; \
- skb->dev = ndev; \
- } else if (CPDMA_RX_SOURCE_PORT(status) == 2) { \
- ndev = cpsw->slaves[1].ndev; \
- skb->dev = ndev; \
- } \
- } while (0)
-#define cpsw_add_mcast(cpsw, priv, addr) \
- do { \
- if (cpsw->data.dual_emac) { \
- struct cpsw_slave *slave = cpsw->slaves + \
- priv->emac_port; \
- int slave_port = cpsw_get_slave_port( \
- slave->slave_num); \
- cpsw_ale_add_mcast(cpsw->ale, addr, \
- 1 << slave_port | ALE_PORT_HOST, \
- ALE_VLAN, slave->port_vlan, 0); \
- } else { \
- cpsw_ale_add_mcast(cpsw->ale, addr, \
- ALE_ALL_PORTS, \
- 0, 0, 0); \
- } \
- } while (0)
-
static inline int cpsw_get_slave_port(u32 slave_num)
{
return slave_num + 1;
}
+static void cpsw_add_mcast(struct cpsw_priv *priv, u8 *addr)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ if (cpsw->data.dual_emac) {
+ struct cpsw_slave *slave = cpsw->slaves + priv->emac_port;
+ int slave_port = cpsw_get_slave_port(slave->slave_num);
+
+ cpsw_ale_add_mcast(cpsw->ale, addr,
+ 1 << slave_port | ALE_PORT_HOST,
+ ALE_VLAN, slave->port_vlan, 0);
+ return;
+ }
+
+ cpsw_ale_add_mcast(cpsw->ale, addr, ALE_ALL_PORTS, 0, 0, 0);
+}
+
static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
{
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
@@ -706,7 +694,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
/* program multicast address list into ALE register */
netdev_for_each_mc_addr(ha, ndev) {
- cpsw_add_mcast(cpsw, priv, (u8 *)ha->addr);
+ cpsw_add_mcast(priv, ha->addr);
}
}
}
@@ -798,10 +786,16 @@ static void cpsw_rx_handler(void *token, int len, int status)
struct sk_buff *skb = token;
struct sk_buff *new_skb;
struct net_device *ndev = skb->dev;
- int ret = 0;
+ int ret = 0, port;
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
- cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb);
+ if (cpsw->data.dual_emac) {
+ port = CPDMA_RX_SOURCE_PORT(status);
+ if (port) {
+ ndev = cpsw->slaves[--port].ndev;
+ skb->dev = ndev;
+ }
+ }
if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
/* In dual emac mode check for all interfaces */
@@ -3287,7 +3281,7 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
priv_sl2->emac_port = 1;
cpsw->slaves[1].ndev = ndev;
- ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 16c3bfbe1992..757a3b37ae8a 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -218,6 +218,7 @@ issue:
ret = of_mdiobus_register(bus, np1);
if (ret) {
mdiobus_free(bus);
+ lp->mii_bus = NULL;
return ret;
}
return 0;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 4b6e308199d2..a32ded5b4f41 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -873,6 +873,17 @@ struct netvsc_ethtool_stats {
unsigned long wake_queue;
};
+struct netvsc_ethtool_pcpu_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 vf_rx_packets;
+ u64 vf_rx_bytes;
+ u64 vf_tx_packets;
+ u64 vf_tx_bytes;
+};
+
struct netvsc_vf_pcpu_stats {
u64 rx_packets;
u64 rx_bytes;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index cf4f40a04194..20275d1e6f9a 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1118,6 +1118,64 @@ static void netvsc_get_vf_stats(struct net_device *net,
}
}
+static void netvsc_get_pcpu_stats(struct net_device *net,
+ struct netvsc_ethtool_pcpu_stats *pcpu_tot)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(net);
+ struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
+ int i;
+
+ /* fetch percpu stats of vf */
+ for_each_possible_cpu(i) {
+ const struct netvsc_vf_pcpu_stats *stats =
+ per_cpu_ptr(ndev_ctx->vf_stats, i);
+ struct netvsc_ethtool_pcpu_stats *this_tot = &pcpu_tot[i];
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ this_tot->vf_rx_packets = stats->rx_packets;
+ this_tot->vf_tx_packets = stats->tx_packets;
+ this_tot->vf_rx_bytes = stats->rx_bytes;
+ this_tot->vf_tx_bytes = stats->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ this_tot->rx_packets = this_tot->vf_rx_packets;
+ this_tot->tx_packets = this_tot->vf_tx_packets;
+ this_tot->rx_bytes = this_tot->vf_rx_bytes;
+ this_tot->tx_bytes = this_tot->vf_tx_bytes;
+ }
+
+ /* fetch percpu stats of netvsc */
+ for (i = 0; i < nvdev->num_chn; i++) {
+ const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
+ const struct netvsc_stats *stats;
+ struct netvsc_ethtool_pcpu_stats *this_tot =
+ &pcpu_tot[nvchan->channel->target_cpu];
+ u64 packets, bytes;
+ unsigned int start;
+
+ stats = &nvchan->tx_stats;
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ packets = stats->packets;
+ bytes = stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+
+ this_tot->tx_bytes += bytes;
+ this_tot->tx_packets += packets;
+
+ stats = &nvchan->rx_stats;
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ packets = stats->packets;
+ bytes = stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+
+ this_tot->rx_bytes += bytes;
+ this_tot->rx_packets += packets;
+ }
+}
+
static void netvsc_get_stats64(struct net_device *net,
struct rtnl_link_stats64 *t)
{
@@ -1215,6 +1273,23 @@ static const struct {
{ "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
{ "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
{ "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
+}, pcpu_stats[] = {
+ { "cpu%u_rx_packets",
+ offsetof(struct netvsc_ethtool_pcpu_stats, rx_packets) },
+ { "cpu%u_rx_bytes",
+ offsetof(struct netvsc_ethtool_pcpu_stats, rx_bytes) },
+ { "cpu%u_tx_packets",
+ offsetof(struct netvsc_ethtool_pcpu_stats, tx_packets) },
+ { "cpu%u_tx_bytes",
+ offsetof(struct netvsc_ethtool_pcpu_stats, tx_bytes) },
+ { "cpu%u_vf_rx_packets",
+ offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_packets) },
+ { "cpu%u_vf_rx_bytes",
+ offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_bytes) },
+ { "cpu%u_vf_tx_packets",
+ offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_packets) },
+ { "cpu%u_vf_tx_bytes",
+ offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_bytes) },
}, vf_stats[] = {
{ "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
{ "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
@@ -1226,6 +1301,9 @@ static const struct {
#define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
#define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats)
+/* statistics per queue (rx/tx packets/bytes) */
+#define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats))
+
/* 4 statistics per queue (rx/tx packets/bytes) */
#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
@@ -1241,7 +1319,8 @@ static int netvsc_get_sset_count(struct net_device *dev, int string_set)
case ETH_SS_STATS:
return NETVSC_GLOBAL_STATS_LEN
+ NETVSC_VF_STATS_LEN
- + NETVSC_QUEUE_STATS_LEN(nvdev);
+ + NETVSC_QUEUE_STATS_LEN(nvdev)
+ + NETVSC_PCPU_STATS_LEN;
default:
return -EINVAL;
}
@@ -1255,9 +1334,10 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
const void *nds = &ndc->eth_stats;
const struct netvsc_stats *qstats;
struct netvsc_vf_pcpu_stats sum;
+ struct netvsc_ethtool_pcpu_stats *pcpu_sum;
unsigned int start;
u64 packets, bytes;
- int i, j;
+ int i, j, cpu;
if (!nvdev)
return;
@@ -1289,6 +1369,19 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
data[i++] = packets;
data[i++] = bytes;
}
+
+ pcpu_sum = kvmalloc_array(num_possible_cpus(),
+ sizeof(struct netvsc_ethtool_pcpu_stats),
+ GFP_KERNEL);
+ netvsc_get_pcpu_stats(dev, pcpu_sum);
+ for_each_present_cpu(cpu) {
+ struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu];
+
+ for (j = 0; j < ARRAY_SIZE(pcpu_stats); j++)
+ data[i++] = *(u64 *)((void *)this_sum
+ + pcpu_stats[j].offset);
+ }
+ kvfree(pcpu_sum);
}
static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -1296,7 +1389,7 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
u8 *p = data;
- int i;
+ int i, cpu;
if (!nvdev)
return;
@@ -1324,6 +1417,13 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
}
+ for_each_present_cpu(cpu) {
+ for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++) {
+ sprintf(p, pcpu_stats[i].name, cpu);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+
break;
}
}
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index d00d42c845b7..7ae1856d1f18 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -220,14 +220,14 @@ static int net_failover_change_mtu(struct net_device *dev, int new_mtu)
struct net_device *primary_dev, *standby_dev;
int ret = 0;
- primary_dev = rcu_dereference(nfo_info->primary_dev);
+ primary_dev = rtnl_dereference(nfo_info->primary_dev);
if (primary_dev) {
ret = dev_set_mtu(primary_dev, new_mtu);
if (ret)
return ret;
}
- standby_dev = rcu_dereference(nfo_info->standby_dev);
+ standby_dev = rtnl_dereference(nfo_info->standby_dev);
if (standby_dev) {
ret = dev_set_mtu(standby_dev, new_mtu);
if (ret) {
diff --git a/drivers/net/netdevsim/devlink.c b/drivers/net/netdevsim/devlink.c
index ba663e5af168..5135fc371f01 100644
--- a/drivers/net/netdevsim/devlink.c
+++ b/drivers/net/netdevsim/devlink.c
@@ -207,6 +207,7 @@ void nsim_devlink_teardown(struct netdevsim *ns)
struct net *net = nsim_to_net(ns);
bool *reg_devlink = net_generic(net, nsim_devlink_id);
+ devlink_resources_unregister(ns->devlink, NULL);
devlink_unregister(ns->devlink);
devlink_free(ns->devlink);
ns->devlink = NULL;
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 2d244551298b..8d8e2b3f263e 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -41,6 +41,9 @@ struct nsim_vf_config {
static u32 nsim_dev_id;
+static struct dentry *nsim_ddir;
+static struct dentry *nsim_sdev_ddir;
+
static int nsim_num_vf(struct device *dev)
{
struct netdevsim *ns = to_nsim(dev);
@@ -566,9 +569,6 @@ static struct rtnl_link_ops nsim_link_ops __read_mostly = {
.dellink = nsim_dellink,
};
-struct dentry *nsim_ddir;
-struct dentry *nsim_sdev_ddir;
-
static int __init nsim_module_init(void)
{
int err;
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 02be199eb005..384c254fafc5 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -103,9 +103,6 @@ struct netdevsim {
struct nsim_ipsec ipsec;
};
-extern struct dentry *nsim_ddir;
-extern struct dentry *nsim_sdev_ddir;
-
#ifdef CONFIG_BPF_SYSCALL
int nsim_bpf_init(struct netdevsim *ns);
void nsim_bpf_uninit(struct netdevsim *ns);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 1cd439bdf608..f7c69ca34056 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -679,7 +679,7 @@ static int m88e1116r_config_init(struct phy_device *phydev)
if (err < 0)
return err;
- mdelay(500);
+ msleep(500);
err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c
index 0831b7142df7..c017486e9b86 100644
--- a/drivers/net/phy/mdio-mux-bcm-iproc.c
+++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
@@ -13,7 +13,7 @@
* You should have received a copy of the GNU General Public License
* version 2 (GPLv2) along with this source code.
*/
-
+#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/of_mdio.h>
@@ -22,7 +22,14 @@
#include <linux/mdio-mux.h>
#include <linux/delay.h>
-#define MDIO_PARAM_OFFSET 0x00
+#define MDIO_RATE_ADJ_EXT_OFFSET 0x000
+#define MDIO_RATE_ADJ_INT_OFFSET 0x004
+#define MDIO_RATE_ADJ_DIVIDENT_SHIFT 16
+
+#define MDIO_SCAN_CTRL_OFFSET 0x008
+#define MDIO_SCAN_CTRL_OVRIDE_EXT_MSTR 28
+
+#define MDIO_PARAM_OFFSET 0x23c
#define MDIO_PARAM_MIIM_CYCLE 29
#define MDIO_PARAM_INTERNAL_SEL 25
#define MDIO_PARAM_BUS_ID 22
@@ -30,27 +37,56 @@
#define MDIO_PARAM_PHY_ID 16
#define MDIO_PARAM_PHY_DATA 0
-#define MDIO_READ_OFFSET 0x04
+#define MDIO_READ_OFFSET 0x240
#define MDIO_READ_DATA_MASK 0xffff
-#define MDIO_ADDR_OFFSET 0x08
+#define MDIO_ADDR_OFFSET 0x244
-#define MDIO_CTRL_OFFSET 0x0C
+#define MDIO_CTRL_OFFSET 0x248
#define MDIO_CTRL_WRITE_OP 0x1
#define MDIO_CTRL_READ_OP 0x2
-#define MDIO_STAT_OFFSET 0x10
+#define MDIO_STAT_OFFSET 0x24c
#define MDIO_STAT_DONE 1
#define BUS_MAX_ADDR 32
#define EXT_BUS_START_ADDR 16
+#define MDIO_REG_ADDR_SPACE_SIZE 0x250
+
+#define MDIO_OPERATING_FREQUENCY 11000000
+#define MDIO_RATE_ADJ_DIVIDENT 1
+
struct iproc_mdiomux_desc {
void *mux_handle;
void __iomem *base;
struct device *dev;
struct mii_bus *mii_bus;
+ struct clk *core_clk;
};
+static void mdio_mux_iproc_config(struct iproc_mdiomux_desc *md)
+{
+ u32 divisor;
+ u32 val;
+
+ /* Disable external mdio master access */
+ val = readl(md->base + MDIO_SCAN_CTRL_OFFSET);
+ val |= BIT(MDIO_SCAN_CTRL_OVRIDE_EXT_MSTR);
+ writel(val, md->base + MDIO_SCAN_CTRL_OFFSET);
+
+ if (md->core_clk) {
+ /* use rate adjust regs to derrive the mdio's operating
+ * frequency from the specified core clock
+ */
+ divisor = clk_get_rate(md->core_clk) / MDIO_OPERATING_FREQUENCY;
+ divisor = divisor / (MDIO_RATE_ADJ_DIVIDENT + 1);
+ val = divisor;
+ val |= MDIO_RATE_ADJ_DIVIDENT << MDIO_RATE_ADJ_DIVIDENT_SHIFT;
+ writel(val, md->base + MDIO_RATE_ADJ_EXT_OFFSET);
+ writel(val, md->base + MDIO_RATE_ADJ_INT_OFFSET);
+ }
+}
+
static int iproc_mdio_wait_for_idle(void __iomem *base, bool result)
{
unsigned int timeout = 1000; /* loop for 1s */
@@ -169,18 +205,39 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
md->dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res->start & 0xfff) {
+ /* For backward compatibility in case the
+ * base address is specified with an offset.
+ */
+ dev_info(&pdev->dev, "fix base address in dt-blob\n");
+ res->start &= ~0xfff;
+ res->end = res->start + MDIO_REG_ADDR_SPACE_SIZE - 1;
+ }
md->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(md->base)) {
dev_err(&pdev->dev, "failed to ioremap register\n");
return PTR_ERR(md->base);
}
- md->mii_bus = mdiobus_alloc();
+ md->mii_bus = devm_mdiobus_alloc(&pdev->dev);
if (!md->mii_bus) {
dev_err(&pdev->dev, "mdiomux bus alloc failed\n");
return -ENOMEM;
}
+ md->core_clk = devm_clk_get(&pdev->dev, NULL);
+ if (md->core_clk == ERR_PTR(-ENOENT) ||
+ md->core_clk == ERR_PTR(-EINVAL))
+ md->core_clk = NULL;
+ else if (IS_ERR(md->core_clk))
+ return PTR_ERR(md->core_clk);
+
+ rc = clk_prepare_enable(md->core_clk);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to enable core clk\n");
+ return rc;
+ }
+
bus = md->mii_bus;
bus->priv = md;
bus->name = "iProc MDIO mux bus";
@@ -194,7 +251,7 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
rc = mdiobus_register(bus);
if (rc) {
dev_err(&pdev->dev, "mdiomux registration failed\n");
- goto out;
+ goto out_clk;
}
platform_set_drvdata(pdev, md);
@@ -206,27 +263,55 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
goto out_register;
}
+ mdio_mux_iproc_config(md);
+
dev_info(md->dev, "iProc mdiomux registered\n");
return 0;
out_register:
mdiobus_unregister(bus);
-out:
- mdiobus_free(bus);
+out_clk:
+ clk_disable_unprepare(md->core_clk);
return rc;
}
static int mdio_mux_iproc_remove(struct platform_device *pdev)
{
- struct iproc_mdiomux_desc *md = dev_get_platdata(&pdev->dev);
+ struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev);
mdio_mux_uninit(md->mux_handle);
mdiobus_unregister(md->mii_bus);
- mdiobus_free(md->mii_bus);
+ clk_disable_unprepare(md->core_clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mdio_mux_iproc_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(md->core_clk);
return 0;
}
+static int mdio_mux_iproc_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev);
+
+ clk_prepare_enable(md->core_clk);
+ mdio_mux_iproc_config(md);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mdio_mux_iproc_pm_ops,
+ mdio_mux_iproc_suspend, mdio_mux_iproc_resume);
+
static const struct of_device_id mdio_mux_iproc_match[] = {
{
.compatible = "brcm,mdio-mux-iproc",
@@ -239,6 +324,7 @@ static struct platform_driver mdiomux_iproc_driver = {
.driver = {
.name = "mdio-mux-iproc",
.of_match_table = mdio_mux_iproc_match,
+ .pm = &mdio_mux_iproc_pm_ops,
},
.probe = mdio_mux_iproc_probe,
.remove = mdio_mux_iproc_remove,
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index 650c2667d523..84ca9ff40ae0 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -123,7 +123,7 @@ static const struct vsc8531_edge_rate_table edge_table[] = {
};
#endif /* CONFIG_OF_MDIO */
-static int vsc85xx_phy_page_set(struct phy_device *phydev, u8 page)
+static int vsc85xx_phy_page_set(struct phy_device *phydev, u16 page)
{
int rc;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index d2baedc4ea91..1ee25877c4d1 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -471,8 +471,14 @@ static int phy_config_aneg(struct phy_device *phydev)
{
if (phydev->drv->config_aneg)
return phydev->drv->config_aneg(phydev);
- else
- return genphy_config_aneg(phydev);
+
+ /* Clause 45 PHYs that don't implement Clause 22 registers are not
+ * allowed to call genphy_config_aneg()
+ */
+ if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0)))
+ return -EOPNOTSUPP;
+
+ return genphy_config_aneg(phydev);
}
/**
@@ -519,7 +525,7 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
* negotiation may already be done and aneg interrupt may not be
* generated.
*/
- if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) {
+ if (!phy_polling_mode(phydev) && phydev->state == PHY_AN) {
err = phy_aneg_done(phydev);
if (err > 0) {
trigger = true;
@@ -977,7 +983,7 @@ void phy_state_machine(struct work_struct *work)
needs_aneg = true;
break;
case PHY_NOLINK:
- if (phydev->irq != PHY_POLL)
+ if (!phy_polling_mode(phydev))
break;
err = phy_read_status(phydev);
@@ -1018,7 +1024,7 @@ void phy_state_machine(struct work_struct *work)
/* Only register a CHANGE if we are polling and link changed
* since latest checking.
*/
- if (phydev->irq == PHY_POLL) {
+ if (phy_polling_mode(phydev)) {
old_link = phydev->link;
err = phy_read_status(phydev);
if (err)
@@ -1117,7 +1123,7 @@ void phy_state_machine(struct work_struct *work)
* PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving
* between states from phy_mac_interrupt()
*/
- if (phydev->irq == PHY_POLL)
+ if (phy_polling_mode(phydev))
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
PHY_STATE_TIME * HZ);
}
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index 6c7fd98cb00a..a205750b431b 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -96,7 +96,7 @@ static inline void sha_pad_init(struct sha_pad *shapad)
*/
struct ppp_mppe_state {
struct crypto_skcipher *arc4;
- struct crypto_ahash *sha1;
+ struct shash_desc *sha1;
unsigned char *sha1_digest;
unsigned char master_key[MPPE_MAX_KEY_LEN];
unsigned char session_key[MPPE_MAX_KEY_LEN];
@@ -136,25 +136,16 @@ struct ppp_mppe_state {
*/
static void get_new_key_from_sha(struct ppp_mppe_state * state)
{
- AHASH_REQUEST_ON_STACK(req, state->sha1);
- struct scatterlist sg[4];
- unsigned int nbytes;
-
- sg_init_table(sg, 4);
-
- nbytes = setup_sg(&sg[0], state->master_key, state->keylen);
- nbytes += setup_sg(&sg[1], sha_pad->sha_pad1,
- sizeof(sha_pad->sha_pad1));
- nbytes += setup_sg(&sg[2], state->session_key, state->keylen);
- nbytes += setup_sg(&sg[3], sha_pad->sha_pad2,
- sizeof(sha_pad->sha_pad2));
-
- ahash_request_set_tfm(req, state->sha1);
- ahash_request_set_callback(req, 0, NULL, NULL);
- ahash_request_set_crypt(req, sg, state->sha1_digest, nbytes);
-
- crypto_ahash_digest(req);
- ahash_request_zero(req);
+ crypto_shash_init(state->sha1);
+ crypto_shash_update(state->sha1, state->master_key,
+ state->keylen);
+ crypto_shash_update(state->sha1, sha_pad->sha_pad1,
+ sizeof(sha_pad->sha_pad1));
+ crypto_shash_update(state->sha1, state->session_key,
+ state->keylen);
+ crypto_shash_update(state->sha1, sha_pad->sha_pad2,
+ sizeof(sha_pad->sha_pad2));
+ crypto_shash_final(state->sha1, state->sha1_digest);
}
/*
@@ -200,6 +191,7 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
static void *mppe_alloc(unsigned char *options, int optlen)
{
struct ppp_mppe_state *state;
+ struct crypto_shash *shash;
unsigned int digestsize;
if (optlen != CILEN_MPPE + sizeof(state->master_key) ||
@@ -217,13 +209,21 @@ static void *mppe_alloc(unsigned char *options, int optlen)
goto out_free;
}
- state->sha1 = crypto_alloc_ahash("sha1", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(state->sha1)) {
- state->sha1 = NULL;
+ shash = crypto_alloc_shash("sha1", 0, 0);
+ if (IS_ERR(shash))
+ goto out_free;
+
+ state->sha1 = kmalloc(sizeof(*state->sha1) +
+ crypto_shash_descsize(shash),
+ GFP_KERNEL);
+ if (!state->sha1) {
+ crypto_free_shash(shash);
goto out_free;
}
+ state->sha1->tfm = shash;
+ state->sha1->flags = 0;
- digestsize = crypto_ahash_digestsize(state->sha1);
+ digestsize = crypto_shash_digestsize(shash);
if (digestsize < MPPE_MAX_KEY_LEN)
goto out_free;
@@ -246,7 +246,10 @@ static void *mppe_alloc(unsigned char *options, int optlen)
out_free:
kfree(state->sha1_digest);
- crypto_free_ahash(state->sha1);
+ if (state->sha1) {
+ crypto_free_shash(state->sha1->tfm);
+ kzfree(state->sha1);
+ }
crypto_free_skcipher(state->arc4);
kfree(state);
out:
@@ -261,7 +264,8 @@ static void mppe_free(void *arg)
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
if (state) {
kfree(state->sha1_digest);
- crypto_free_ahash(state->sha1);
+ crypto_free_shash(state->sha1->tfm);
+ kzfree(state->sha1);
crypto_free_skcipher(state->arc4);
kfree(state);
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 0a3134712652..2bbefe828670 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -200,6 +200,7 @@ struct tun_flow_entry {
};
#define TUN_NUM_FLOW_ENTRIES 1024
+#define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1)
struct tun_prog {
struct rcu_head rcu;
@@ -406,7 +407,7 @@ static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
static inline u32 tun_hashfn(u32 rxhash)
{
- return rxhash & 0x3ff;
+ return rxhash & TUN_MASK_FLOW_ENTRIES;
}
static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index ac25981ee4d5..a9991c5f4736 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1242,6 +1242,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
mod_timer(&dev->stat_monitor,
jiffies + STAT_UPDATE_TIMER);
}
+
+ tasklet_schedule(&dev->bh);
}
return ret;
@@ -1647,7 +1649,7 @@ lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
struct lan78xx_net *dev = netdev_priv(netdev);
/* Read Device/MAC registers */
- for (i = 0; i < (sizeof(lan78xx_regs) / sizeof(u32)); i++)
+ for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
if (!netdev->phydev)
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 6514c86f043e..f4247b275e09 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -1067,7 +1067,7 @@ static inline void setup_pegasus_II(pegasus_t *pegasus)
set_register(pegasus, Reg1d, 0);
set_register(pegasus, Reg7b, 1);
- mdelay(100);
+ msleep(100);
if ((pegasus->features & HAS_HOME_PNA) && mii_mode)
set_register(pegasus, Reg7b, 0);
else
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 38502809420b..cb0cc30c3d6a 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1246,7 +1246,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
- {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e */
+ {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 2d316c1b851b..6ac232e52bf7 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -358,7 +358,7 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
/* power up and reset phy */
sr_write_reg(dev, SR_PRR, PRR_PHY_RST);
/* at least 10ms, here 20ms for safe */
- mdelay(20);
+ msleep(20);
sr_write_reg(dev, SR_PRR, 0);
/* at least 1ms, here 2ms for reading right register */
udelay(2 * 1000);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 2ff08bc103a9..62311dde6e71 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -82,25 +82,43 @@ struct virtnet_sq_stats {
struct u64_stats_sync syncp;
u64 packets;
u64 bytes;
+ u64 xdp_tx;
+ u64 xdp_tx_drops;
+ u64 kicks;
};
struct virtnet_rq_stats {
struct u64_stats_sync syncp;
u64 packets;
u64 bytes;
+ u64 drops;
+ u64 xdp_packets;
+ u64 xdp_tx;
+ u64 xdp_redirects;
+ u64 xdp_drops;
+ u64 kicks;
};
#define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m)
static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
- { "packets", VIRTNET_SQ_STAT(packets) },
- { "bytes", VIRTNET_SQ_STAT(bytes) },
+ { "packets", VIRTNET_SQ_STAT(packets) },
+ { "bytes", VIRTNET_SQ_STAT(bytes) },
+ { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) },
+ { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) },
+ { "kicks", VIRTNET_SQ_STAT(kicks) },
};
static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
- { "packets", VIRTNET_RQ_STAT(packets) },
- { "bytes", VIRTNET_RQ_STAT(bytes) },
+ { "packets", VIRTNET_RQ_STAT(packets) },
+ { "bytes", VIRTNET_RQ_STAT(bytes) },
+ { "drops", VIRTNET_RQ_STAT(drops) },
+ { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) },
+ { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) },
+ { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) },
+ { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) },
+ { "kicks", VIRTNET_RQ_STAT(kicks) },
};
#define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc)
@@ -447,22 +465,12 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
return 0;
}
-static int __virtnet_xdp_tx_xmit(struct virtnet_info *vi,
- struct xdp_frame *xdpf)
+static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi)
{
- struct xdp_frame *xdpf_sent;
- struct send_queue *sq;
- unsigned int len;
unsigned int qp;
qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
- sq = &vi->sq[qp];
-
- /* Free up any pending old buffers before queueing new ones. */
- while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
- xdp_return_frame(xdpf_sent);
-
- return __virtnet_xdp_xmit_one(vi, sq, xdpf);
+ return &vi->sq[qp];
}
static int virtnet_xdp_xmit(struct net_device *dev,
@@ -474,23 +482,28 @@ static int virtnet_xdp_xmit(struct net_device *dev,
struct bpf_prog *xdp_prog;
struct send_queue *sq;
unsigned int len;
- unsigned int qp;
int drops = 0;
- int err;
+ int kicks = 0;
+ int ret, err;
int i;
- if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
- return -EINVAL;
+ sq = virtnet_xdp_sq(vi);
- qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
- sq = &vi->sq[qp];
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
+ ret = -EINVAL;
+ drops = n;
+ goto out;
+ }
/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
* indicate XDP resources have been successfully allocated.
*/
xdp_prog = rcu_dereference(rq->xdp_prog);
- if (!xdp_prog)
- return -ENXIO;
+ if (!xdp_prog) {
+ ret = -ENXIO;
+ drops = n;
+ goto out;
+ }
/* Free up any pending old buffers before queueing new ones. */
while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
@@ -505,11 +518,20 @@ static int virtnet_xdp_xmit(struct net_device *dev,
drops++;
}
}
+ ret = n - drops;
- if (flags & XDP_XMIT_FLUSH)
- virtqueue_kick(sq->vq);
+ if (flags & XDP_XMIT_FLUSH) {
+ if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
+ kicks = 1;
+ }
+out:
+ u64_stats_update_begin(&sq->stats.syncp);
+ sq->stats.xdp_tx += n;
+ sq->stats.xdp_tx_drops += drops;
+ sq->stats.kicks += kicks;
+ u64_stats_update_end(&sq->stats.syncp);
- return n - drops;
+ return ret;
}
static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
@@ -586,7 +608,8 @@ static struct sk_buff *receive_small(struct net_device *dev,
struct receive_queue *rq,
void *buf, void *ctx,
unsigned int len,
- unsigned int *xdp_xmit)
+ unsigned int *xdp_xmit,
+ struct virtnet_rq_stats *stats)
{
struct sk_buff *skb;
struct bpf_prog *xdp_prog;
@@ -601,6 +624,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
int err;
len -= vi->hdr_len;
+ stats->bytes += len;
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
@@ -642,6 +666,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
xdp.rxq = &rq->xdp_rxq;
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ stats->xdp_packets++;
switch (act) {
case XDP_PASS:
@@ -650,11 +675,12 @@ static struct sk_buff *receive_small(struct net_device *dev,
len = xdp.data_end - xdp.data;
break;
case XDP_TX:
+ stats->xdp_tx++;
xdpf = convert_to_xdp_frame(&xdp);
if (unlikely(!xdpf))
goto err_xdp;
- err = __virtnet_xdp_tx_xmit(vi, xdpf);
- if (unlikely(err)) {
+ err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
+ if (unlikely(err < 0)) {
trace_xdp_exception(vi->dev, xdp_prog, act);
goto err_xdp;
}
@@ -662,6 +688,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
rcu_read_unlock();
goto xdp_xmit;
case XDP_REDIRECT:
+ stats->xdp_redirects++;
err = xdp_do_redirect(dev, &xdp, xdp_prog);
if (err)
goto err_xdp;
@@ -670,6 +697,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
goto xdp_xmit;
default:
bpf_warn_invalid_xdp_action(act);
+ /* fall through */
case XDP_ABORTED:
trace_xdp_exception(vi->dev, xdp_prog, act);
case XDP_DROP:
@@ -695,7 +723,8 @@ err:
err_xdp:
rcu_read_unlock();
- dev->stats.rx_dropped++;
+ stats->xdp_drops++;
+ stats->drops++;
put_page(page);
xdp_xmit:
return NULL;
@@ -705,18 +734,20 @@ static struct sk_buff *receive_big(struct net_device *dev,
struct virtnet_info *vi,
struct receive_queue *rq,
void *buf,
- unsigned int len)
+ unsigned int len,
+ struct virtnet_rq_stats *stats)
{
struct page *page = buf;
struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
+ stats->bytes += len - vi->hdr_len;
if (unlikely(!skb))
goto err;
return skb;
err:
- dev->stats.rx_dropped++;
+ stats->drops++;
give_pages(rq, page);
return NULL;
}
@@ -727,7 +758,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
void *buf,
void *ctx,
unsigned int len,
- unsigned int *xdp_xmit)
+ unsigned int *xdp_xmit,
+ struct virtnet_rq_stats *stats)
{
struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
@@ -740,6 +772,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
int err;
head_skb = NULL;
+ stats->bytes += len - vi->hdr_len;
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
@@ -788,6 +821,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
xdp.rxq = &rq->xdp_rxq;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ stats->xdp_packets++;
switch (act) {
case XDP_PASS:
@@ -812,11 +846,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
}
break;
case XDP_TX:
+ stats->xdp_tx++;
xdpf = convert_to_xdp_frame(&xdp);
if (unlikely(!xdpf))
goto err_xdp;
- err = __virtnet_xdp_tx_xmit(vi, xdpf);
- if (unlikely(err)) {
+ err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
+ if (unlikely(err < 0)) {
trace_xdp_exception(vi->dev, xdp_prog, act);
if (unlikely(xdp_page != page))
put_page(xdp_page);
@@ -828,6 +863,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
rcu_read_unlock();
goto xdp_xmit;
case XDP_REDIRECT:
+ stats->xdp_redirects++;
err = xdp_do_redirect(dev, &xdp, xdp_prog);
if (err) {
if (unlikely(xdp_page != page))
@@ -841,8 +877,10 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
goto xdp_xmit;
default:
bpf_warn_invalid_xdp_action(act);
+ /* fall through */
case XDP_ABORTED:
trace_xdp_exception(vi->dev, xdp_prog, act);
+ /* fall through */
case XDP_DROP:
if (unlikely(xdp_page != page))
__free_pages(xdp_page, 0);
@@ -877,6 +915,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
goto err_buf;
}
+ stats->bytes += len;
page = virt_to_head_page(buf);
truesize = mergeable_ctx_to_truesize(ctx);
@@ -922,6 +961,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
err_xdp:
rcu_read_unlock();
+ stats->xdp_drops++;
err_skb:
put_page(page);
while (num_buf-- > 1) {
@@ -932,24 +972,25 @@ err_skb:
dev->stats.rx_length_errors++;
break;
}
+ stats->bytes += len;
page = virt_to_head_page(buf);
put_page(page);
}
err_buf:
- dev->stats.rx_dropped++;
+ stats->drops++;
dev_kfree_skb(head_skb);
xdp_xmit:
return NULL;
}
-static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
- void *buf, unsigned int len, void **ctx,
- unsigned int *xdp_xmit)
+static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
+ void *buf, unsigned int len, void **ctx,
+ unsigned int *xdp_xmit,
+ struct virtnet_rq_stats *stats)
{
struct net_device *dev = vi->dev;
struct sk_buff *skb;
struct virtio_net_hdr_mrg_rxbuf *hdr;
- int ret;
if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
@@ -961,23 +1002,22 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
} else {
put_page(virt_to_head_page(buf));
}
- return 0;
+ return;
}
if (vi->mergeable_rx_bufs)
- skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit);
+ skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
+ stats);
else if (vi->big_packets)
- skb = receive_big(dev, vi, rq, buf, len);
+ skb = receive_big(dev, vi, rq, buf, len, stats);
else
- skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit);
+ skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
if (unlikely(!skb))
- return 0;
+ return;
hdr = skb_vnet_hdr(skb);
- ret = skb->len;
-
if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -994,12 +1034,11 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
ntohs(skb->protocol), skb->len, skb->pkt_type);
napi_gro_receive(&rq->napi, skb);
- return ret;
+ return;
frame_err:
dev->stats.rx_frame_errors++;
dev_kfree_skb(skb);
- return 0;
}
/* Unlike mergeable buffers, all buffers are allocated to the
@@ -1166,7 +1205,12 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
if (err)
break;
} while (rq->vq->num_free);
- virtqueue_kick(rq->vq);
+ if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
+ u64_stats_update_begin(&rq->stats.syncp);
+ rq->stats.kicks++;
+ u64_stats_update_end(&rq->stats.syncp);
+ }
+
return !oom;
}
@@ -1241,22 +1285,24 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
unsigned int *xdp_xmit)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
- unsigned int len, received = 0, bytes = 0;
+ struct virtnet_rq_stats stats = {};
+ unsigned int len;
void *buf;
+ int i;
if (!vi->big_packets || vi->mergeable_rx_bufs) {
void *ctx;
- while (received < budget &&
+ while (stats.packets < budget &&
(buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
- bytes += receive_buf(vi, rq, buf, len, ctx, xdp_xmit);
- received++;
+ receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
+ stats.packets++;
}
} else {
- while (received < budget &&
+ while (stats.packets < budget &&
(buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
- bytes += receive_buf(vi, rq, buf, len, NULL, xdp_xmit);
- received++;
+ receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
+ stats.packets++;
}
}
@@ -1266,11 +1312,16 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
}
u64_stats_update_begin(&rq->stats.syncp);
- rq->stats.bytes += bytes;
- rq->stats.packets += received;
+ for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
+ size_t offset = virtnet_rq_stats_desc[i].offset;
+ u64 *item;
+
+ item = (u64 *)((u8 *)&rq->stats + offset);
+ *item += *(u64 *)((u8 *)&stats + offset);
+ }
u64_stats_update_end(&rq->stats.syncp);
- return received;
+ return stats.packets;
}
static void free_old_xmit_skbs(struct send_queue *sq)
@@ -1326,7 +1377,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
container_of(napi, struct receive_queue, napi);
struct virtnet_info *vi = rq->vq->vdev->priv;
struct send_queue *sq;
- unsigned int received, qp;
+ unsigned int received;
unsigned int xdp_xmit = 0;
virtnet_poll_cleantx(rq);
@@ -1341,10 +1392,12 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
xdp_do_flush_map();
if (xdp_xmit & VIRTIO_XDP_TX) {
- qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
- smp_processor_id();
- sq = &vi->sq[qp];
- virtqueue_kick(sq->vq);
+ sq = virtnet_xdp_sq(vi);
+ if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
+ u64_stats_update_begin(&sq->stats.syncp);
+ sq->stats.kicks++;
+ u64_stats_update_end(&sq->stats.syncp);
+ }
}
return received;
@@ -1506,8 +1559,13 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- if (kick || netif_xmit_stopped(txq))
- virtqueue_kick(sq->vq);
+ if (kick || netif_xmit_stopped(txq)) {
+ if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
+ u64_stats_update_begin(&sq->stats.syncp);
+ sq->stats.kicks++;
+ u64_stats_update_end(&sq->stats.syncp);
+ }
+ }
return NETDEV_TX_OK;
}
@@ -1611,7 +1669,7 @@ static void virtnet_stats(struct net_device *dev,
int i;
for (i = 0; i < vi->max_queue_pairs; i++) {
- u64 tpackets, tbytes, rpackets, rbytes;
+ u64 tpackets, tbytes, rpackets, rbytes, rdrops;
struct receive_queue *rq = &vi->rq[i];
struct send_queue *sq = &vi->sq[i];
@@ -1625,17 +1683,18 @@ static void virtnet_stats(struct net_device *dev,
start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
rpackets = rq->stats.packets;
rbytes = rq->stats.bytes;
+ rdrops = rq->stats.drops;
} while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
tot->rx_packets += rpackets;
tot->tx_packets += tpackets;
tot->rx_bytes += rbytes;
tot->tx_bytes += tbytes;
+ tot->rx_dropped += rdrops;
}
tot->tx_dropped = dev->stats.tx_dropped;
tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
- tot->rx_dropped = dev->stats.rx_dropped;
tot->rx_length_errors = dev->stats.rx_length_errors;
tot->rx_frame_errors = dev->stats.rx_frame_errors;
}
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 093bd21f574d..4907453f17f5 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1362,7 +1362,7 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
case 0x001:
printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name);
break;
- case 0x010:
+ case 0x002:
printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name);
break;
default:
diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c
index 3ed3d9f6aae9..74538085cfb7 100644
--- a/drivers/net/wireless/atmel/atmel.c
+++ b/drivers/net/wireless/atmel/atmel.c
@@ -1399,6 +1399,7 @@ static int atmel_validate_channel(struct atmel_private *priv, int channel)
return 0;
}
+#ifdef CONFIG_PROC_FS
static int atmel_proc_show(struct seq_file *m, void *v)
{
struct atmel_private *priv = m->private;
@@ -1481,6 +1482,7 @@ static int atmel_proc_show(struct seq_file *m, void *v)
seq_printf(m, "Current state:\t\t%s\n", s);
return 0;
}
+#endif
static const struct net_device_ops atmel_netdev_ops = {
.ndo_open = atmel_open,
@@ -3675,7 +3677,7 @@ static int probe_atmel_card(struct net_device *dev)
atmel_write16(dev, GCR, 0x0060);
atmel_write16(dev, GCR, 0x0040);
- mdelay(500);
+ msleep(500);
if (atmel_read16(dev, MR2) == 0) {
/* No stored firmware so load a small stub which just
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 4db4d444407a..8347da632a5b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -93,6 +93,42 @@ static int brcmf_feat_debugfs_read(struct seq_file *seq, void *data)
}
#endif /* DEBUG */
+struct brcmf_feat_fwfeat {
+ const char * const fwid;
+ u32 feat_flags;
+};
+
+static const struct brcmf_feat_fwfeat brcmf_feat_fwfeat_map[] = {
+ /* brcmfmac43602-pcie.ap.bin from linux-firmware.git commit ea1178515b88 */
+ { "01-6cb8e269", BIT(BRCMF_FEAT_MONITOR) },
+ /* brcmfmac4366b-pcie.bin from linux-firmware.git commit 52442afee990 */
+ { "01-c47a91a4", BIT(BRCMF_FEAT_MONITOR) },
+};
+
+static void brcmf_feat_firmware_overrides(struct brcmf_pub *drv)
+{
+ const struct brcmf_feat_fwfeat *e;
+ u32 feat_flags = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(brcmf_feat_fwfeat_map); i++) {
+ e = &brcmf_feat_fwfeat_map[i];
+ if (!strcmp(e->fwid, drv->fwver)) {
+ feat_flags = e->feat_flags;
+ break;
+ }
+ }
+
+ if (!feat_flags)
+ return;
+
+ for (i = 0; i < BRCMF_FEAT_LAST; i++)
+ if (feat_flags & BIT(i))
+ brcmf_dbg(INFO, "enabling firmware feature: %s\n",
+ brcmf_feat_names[i]);
+ drv->feat_flags |= feat_flags;
+}
+
/**
* brcmf_feat_iovar_int_get() - determine feature through iovar query.
*
@@ -253,6 +289,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
}
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa");
+ brcmf_feat_firmware_overrides(drvr);
+
/* set chip related quirks */
switch (drvr->bus_if->chip) {
case BRCM_CC_43236_CHIP_ID:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 45928b5b8d97..4fffa6988087 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1785,7 +1785,8 @@ brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL;
- fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus);
+ /* NVRAM reserves PCI domain 0 for Broadcom's SDK faked bus */
+ fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1;
fwreq->bus_nr = devinfo->pdev->bus->number;
return fwreq;
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index 72046e182745..04dd7a936593 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -3419,7 +3419,7 @@ done:
static void airo_handle_tx(struct airo_info *ai, u16 status)
{
- int i, len = 0, index = -1;
+ int i, index = -1;
u16 fid;
if (test_bit(FLAG_MPI, &ai->flags)) {
@@ -3443,11 +3443,9 @@ static void airo_handle_tx(struct airo_info *ai, u16 status)
fid = IN4500(ai, TXCOMPLFID);
- for(i = 0; i < MAX_FIDS; i++) {
- if ((ai->fids[i] & 0xffff) == fid) {
- len = ai->fids[i] >> 16;
+ for (i = 0; i < MAX_FIDS; i++) {
+ if ((ai->fids[i] & 0xffff) == fid)
index = i;
- }
}
if (index != -1) {
diff --git a/drivers/net/wireless/cisco/airo_cs.c b/drivers/net/wireless/cisco/airo_cs.c
index d9ed22b4cc6b..3718f958c0fc 100644
--- a/drivers/net/wireless/cisco/airo_cs.c
+++ b/drivers/net/wireless/cisco/airo_cs.c
@@ -102,11 +102,8 @@ static int airo_cs_config_check(struct pcmcia_device *p_dev, void *priv_data)
static int airo_config(struct pcmcia_device *link)
{
- struct local_info *dev;
int ret;
- dev = link->priv;
-
dev_dbg(&link->dev, "airo_config\n");
link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP |
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index 1ad83ef5f202..910db46db6a1 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -5112,11 +5112,9 @@ static int ipw2100_disassociate_bssid(struct ipw2100_priv *priv)
.host_command_length = ETH_ALEN
};
int err;
- int len;
IPW_DEBUG_HC("DISASSOCIATION_BSSID\n");
- len = ETH_ALEN;
/* The Firmware currently ignores the BSSID and just disassociates from
* the currently associated AP -- but in the off chance that a future
* firmware does use the BSSID provided here, we go ahead and try and
@@ -7723,7 +7721,6 @@ static int ipw2100_wx_get_auth(struct net_device *dev,
struct libipw_device *ieee = priv->ieee;
struct lib80211_crypt_data *crypt;
struct iw_param *param = &wrqu->param;
- int ret = 0;
switch (param->flags & IW_AUTH_INDEX) {
case IW_AUTH_WPA_VERSION:
@@ -7733,7 +7730,6 @@ static int ipw2100_wx_get_auth(struct net_device *dev,
/*
* wpa_supplicant will control these internally
*/
- ret = -EOPNOTSUPP;
break;
case IW_AUTH_TKIP_COUNTERMEASURES:
@@ -7801,9 +7797,6 @@ static int ipw2100_wx_set_mlme(struct net_device *dev,
{
struct ipw2100_priv *priv = libipw_priv(dev);
struct iw_mlme *mlme = (struct iw_mlme *)extra;
- __le16 reason;
-
- reason = cpu_to_le16(mlme->reason_code);
switch (mlme->cmd) {
case IW_MLME_DEAUTH:
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
index dd29f46d086b..d32d39fa2686 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
@@ -479,7 +479,6 @@ int libipw_wx_get_encode(struct libipw_device *ieee,
{
struct iw_point *erq = &(wrqu->encoding);
int len, key;
- struct lib80211_crypt_data *crypt;
struct libipw_security *sec = &ieee->sec;
LIBIPW_DEBUG_WX("GET_ENCODE\n");
@@ -492,7 +491,6 @@ int libipw_wx_get_encode(struct libipw_device *ieee,
} else
key = ieee->crypt_info.tx_keyidx;
- crypt = ieee->crypt_info.crypt[key];
erq->flags = key + 1;
if (!sec->enabled) {
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 62a9794f952b..57e3b6cca234 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -476,8 +476,6 @@ il3945_tx_skb(struct il_priv *il,
int txq_id = skb_get_queue_mapping(skb);
u16 len, idx, hdr_len;
u16 firstlen, secondlen;
- u8 id;
- u8 unicast;
u8 sta_id;
u8 tid = 0;
__le16 fc;
@@ -496,9 +494,6 @@ il3945_tx_skb(struct il_priv *il,
goto drop_unlock;
}
- unicast = !is_multicast_ether_addr(hdr->addr1);
- id = 0;
-
fc = hdr->frame_control;
#ifdef CONFIG_IWLEGACY_DEBUG
@@ -957,10 +952,8 @@ il3945_rx_queue_restock(struct il_priv *il)
struct list_head *element;
struct il_rx_buf *rxb;
unsigned long flags;
- int write;
spin_lock_irqsave(&rxq->lock, flags);
- write = rxq->write & ~0x7;
while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
/* Get next free Rx buffer, remove from free list */
element = rxq->rx_free.next;
@@ -2725,7 +2718,6 @@ void
il3945_post_associate(struct il_priv *il)
{
int rc = 0;
- struct ieee80211_conf *conf = NULL;
if (!il->vif || !il->is_open)
return;
@@ -2738,8 +2730,6 @@ il3945_post_associate(struct il_priv *il)
il_scan_cancel_timeout(il, 200);
- conf = &il->hw->conf;
-
il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
il3945_commit_rxon(il);
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c
index dbf164d48ed3..3e568ce2fb20 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945.c
@@ -1634,7 +1634,6 @@ il3945_hw_reg_set_txpower(struct il_priv *il, s8 power)
{
struct il_channel_info *ch_info;
s8 max_power;
- u8 a_band;
u8 i;
if (il->tx_power_user_lmt == power) {
@@ -1650,7 +1649,6 @@ il3945_hw_reg_set_txpower(struct il_priv *il, s8 power)
for (i = 0; i < il->channel_count; i++) {
ch_info = &il->channel_info[i];
- a_band = il_is_channel_a_band(ch_info);
/* find minimum power of all user and regulatory constraints
* (does not consider h/w clipping limitations) */
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 562e94870a9c..280cd8ae1696 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -1338,15 +1338,12 @@ il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
u32 *accum_stats;
u32 *delta, *max_delta;
struct stats_general_common *general, *accum_general;
- struct stats_tx *tx, *accum_tx;
prev_stats = (__le32 *) &il->_4965.stats;
accum_stats = (u32 *) &il->_4965.accum_stats;
size = sizeof(struct il_notif_stats);
general = &il->_4965.stats.general.common;
accum_general = &il->_4965.accum_stats.general.common;
- tx = &il->_4965.stats.tx;
- accum_tx = &il->_4965.accum_stats.tx;
delta = (u32 *) &il->_4965.delta_stats;
max_delta = (u32 *) &il->_4965.max_delta;
@@ -4784,7 +4781,6 @@ static void
il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
{
struct il_priv *il = context;
- struct il_ucode_header *ucode;
int err;
struct il4965_firmware_pieces pieces;
const unsigned int api_max = il->cfg->ucode_api_max;
@@ -4814,8 +4810,6 @@ il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
}
/* Data from ucode file: header followed by uCode images */
- ucode = (struct il_ucode_header *)ucode_raw->data;
-
err = il4965_load_firmware(il, ucode_raw, &pieces);
if (err)
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index 4d08d78c6b71..04e376cc898c 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -7,13 +7,13 @@ iwlwifi-objs += iwl-debug.o
iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
-iwlwifi-objs += pcie/ctxt-info.o pcie/trans-gen2.o pcie/tx-gen2.o
+iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-gen3.o
+iwlwifi-objs += pcie/trans-gen2.o pcie/tx-gen2.o
iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
iwlwifi-objs += iwl-trans.o
iwlwifi-objs += fw/notif-wait.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o
-iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
index a63ca8820568..fedb108db68f 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
@@ -63,6 +63,7 @@
static const struct iwl_base_params iwl2000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
+ .max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
.shadow_ram_support = true,
.led_compensation = 51,
@@ -76,6 +77,7 @@ static const struct iwl_base_params iwl2000_base_params = {
static const struct iwl_base_params iwl2030_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
+ .max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
.shadow_ram_support = true,
.led_compensation = 57,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index d4ba66aecdc9..91ca77c7571c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -59,7 +59,7 @@
#define IWL_22000_UCODE_API_MAX 38
/* Lowest firmware API version supported */
-#define IWL_22000_UCODE_API_MIN 24
+#define IWL_22000_UCODE_API_MIN 39
/* NVM versions */
#define IWL_22000_NVM_VERSION 0x0a1d
@@ -73,29 +73,48 @@
#define IWL_22000_SMEM_OFFSET 0x400000
#define IWL_22000_SMEM_LEN 0xD0000
-#define IWL_22000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
-#define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
-#define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-"
-#define IWL_22000_HR_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-"
-#define IWL_22000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-"
-#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
+#define IWL_22000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
+#define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
+#define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-"
+#define IWL_22000_HR_A_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-"
+#define IWL_22000_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
+#define IWL_22000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-"
+#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
+#define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-"
#define IWL_22000_HR_MODULE_FIRMWARE(api) \
IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_JF_MODULE_FIRMWARE(api) \
IWL_22000_JF_FW_PRE __stringify(api) ".ucode"
-#define IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(api) \
- IWL_22000_HR_F0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(api) \
+ IWL_22000_HR_A_F0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \
+ IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
IWL_22000_JF_B0_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \
+ IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_22000 10
static const struct iwl_base_params iwl_22000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000,
.num_of_queues = 512,
+ .max_tfd_queue_size = 256,
+ .shadow_ram_support = true,
+ .led_compensation = 57,
+ .wd_timeout = IWL_LONG_WD_TIMEOUT,
+ .max_event_log_size = 512,
+ .shadow_reg_enable = true,
+ .pcie_l1_allowed = true,
+};
+
+static const struct iwl_base_params iwl_22560_base_params = {
+ .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000,
+ .num_of_queues = 512,
+ .max_tfd_queue_size = 65536,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
@@ -110,11 +129,9 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
};
-#define IWL_DEVICE_22000 \
+#define IWL_DEVICE_22000_COMMON \
.ucode_api_max = IWL_22000_UCODE_API_MAX, \
.ucode_api_min = IWL_22000_UCODE_API_MIN, \
- .device_family = IWL_DEVICE_FAMILY_22000, \
- .base_params = &iwl_22000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000, \
.non_shared_ant = ANT_A, \
@@ -129,6 +146,10 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.mq_rx_supported = true, \
.vht_mu_mimo_supported = true, \
.mac_addr_from_csr = true, \
+ .ht_params = &iwl_22000_ht_params, \
+ .nvm_ver = IWL_22000_NVM_VERSION, \
+ .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, \
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
.use_tfh = true, \
.rf_id = true, \
.gen2 = true, \
@@ -136,86 +157,114 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.dbgc_supported = true, \
.min_umac_error_event_table = 0x400000
+#define IWL_DEVICE_22500 \
+ IWL_DEVICE_22000_COMMON, \
+ .device_family = IWL_DEVICE_FAMILY_22000, \
+ .base_params = &iwl_22000_base_params, \
+ .csr = &iwl_csr_v1
+
+#define IWL_DEVICE_22560 \
+ IWL_DEVICE_22000_COMMON, \
+ .device_family = IWL_DEVICE_FAMILY_22560, \
+ .base_params = &iwl_22560_base_params, \
+ .csr = &iwl_csr_v2
+
const struct iwl_cfg iwl22000_2ac_cfg_hr = {
.name = "Intel(R) Dual Band Wireless AC 22000",
.fw_name_pre = IWL_22000_HR_FW_PRE,
- IWL_DEVICE_22000,
- .csr = &iwl_csr_v1,
- .ht_params = &iwl_22000_ht_params,
- .nvm_ver = IWL_22000_NVM_VERSION,
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ IWL_DEVICE_22500,
};
const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb = {
.name = "Intel(R) Dual Band Wireless AC 22000",
.fw_name_pre = IWL_22000_HR_CDB_FW_PRE,
- IWL_DEVICE_22000,
- .csr = &iwl_csr_v1,
- .ht_params = &iwl_22000_ht_params,
- .nvm_ver = IWL_22000_NVM_VERSION,
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ IWL_DEVICE_22500,
.cdb = true,
};
const struct iwl_cfg iwl22000_2ac_cfg_jf = {
.name = "Intel(R) Dual Band Wireless AC 22000",
.fw_name_pre = IWL_22000_JF_FW_PRE,
- IWL_DEVICE_22000,
- .csr = &iwl_csr_v1,
- .ht_params = &iwl_22000_ht_params,
- .nvm_ver = IWL_22000_NVM_VERSION,
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ IWL_DEVICE_22500,
};
const struct iwl_cfg iwl22000_2ax_cfg_hr = {
.name = "Intel(R) Dual Band Wireless AX 22000",
.fw_name_pre = IWL_22000_HR_FW_PRE,
- IWL_DEVICE_22000,
- .csr = &iwl_csr_v1,
- .ht_params = &iwl_22000_ht_params,
- .nvm_ver = IWL_22000_NVM_VERSION,
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
-const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0 = {
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0 = {
.name = "Intel(R) Dual Band Wireless AX 22000",
- .fw_name_pre = IWL_22000_HR_F0_FW_PRE,
- IWL_DEVICE_22000,
- .csr = &iwl_csr_v1,
- .ht_params = &iwl_22000_ht_params,
- .nvm_ver = IWL_22000_NVM_VERSION,
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .fw_name_pre = IWL_22000_HR_A_F0_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = {
+ .name = "Intel(R) Dual Band Wireless AX 22000",
+ .fw_name_pre = IWL_22000_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0 = {
.name = "Intel(R) Dual Band Wireless AX 22000",
.fw_name_pre = IWL_22000_JF_B0_FW_PRE,
- IWL_DEVICE_22000,
- .csr = &iwl_csr_v1,
- .ht_params = &iwl_22000_ht_params,
- .nvm_ver = IWL_22000_NVM_VERSION,
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = {
.name = "Intel(R) Dual Band Wireless AX 22000",
.fw_name_pre = IWL_22000_HR_A0_FW_PRE,
- IWL_DEVICE_22000,
- .csr = &iwl_csr_v1,
- .ht_params = &iwl_22000_ht_params,
- .nvm_ver = IWL_22000_NVM_VERSION,
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg iwl22560_2ax_cfg_su_cdb = {
+ .name = "Intel(R) Dual Band Wireless AX 22560",
+ .fw_name_pre = IWL_22000_SU_Z0_FW_PRE,
+ IWL_DEVICE_22560,
+ .cdb = true,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
index a224f1be1ec2..36151e61a26f 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
@@ -53,6 +53,7 @@
static const struct iwl_base_params iwl5000_base_params = {
.eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
+ .max_tfd_queue_size = 256,
.pll_cfg = true,
.led_compensation = 51,
.wd_timeout = IWL_WATCHDOG_DISABLED,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
index dbcec7ce7863..b5d8274761d8 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
@@ -72,6 +72,7 @@
static const struct iwl_base_params iwl6000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
+ .max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
.led_compensation = 51,
@@ -84,6 +85,7 @@ static const struct iwl_base_params iwl6000_base_params = {
static const struct iwl_base_params iwl6050_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
+ .max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
.shadow_ram_support = true,
.led_compensation = 51,
@@ -96,6 +98,7 @@ static const struct iwl_base_params iwl6050_base_params = {
static const struct iwl_base_params iwl6000_g2_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
+ .max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
.led_compensation = 57,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
index 69bfa827e82a..a62c8346f13a 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
@@ -123,6 +123,7 @@
static const struct iwl_base_params iwl7000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000,
.num_of_queues = 31,
+ .max_tfd_queue_size = 256,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
index 7262e973e0d6..c46fa712985b 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
@@ -104,6 +104,7 @@
static const struct iwl_base_params iwl8000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000,
.num_of_queues = 31,
+ .max_tfd_queue_size = 256,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index e20c30b29c03..24b2f7cbb308 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -95,6 +95,7 @@
static const struct iwl_base_params iwl9000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_9000,
.num_of_queues = 31,
+ .max_tfd_queue_size = 256,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
@@ -178,6 +179,17 @@ const struct iwl_cfg iwl9260_2ac_cfg = {
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
+const struct iwl_cfg iwl9260_killer_2ac_cfg = {
+ .name = "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW)",
+ .fw_name_pre = IWL9260A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
const struct iwl_cfg iwl9270_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9270",
.fw_name_pre = IWL9260A_FW_PRE,
@@ -267,6 +279,34 @@ const struct iwl_cfg iwl9560_2ac_cfg_soc = {
.soc_latency = 5000,
};
+const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = {
+ .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+ .soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = {
+ .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+ .soc_latency = 5000,
+};
+
const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = {
.name = "Intel(R) Dual Band Wireless AC 9460",
.fw_name_pre = IWL9000A_FW_PRE,
@@ -327,6 +367,36 @@ const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = {
.extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
};
+const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = {
+ .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+ .soc_latency = 5000,
+ .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
+};
+
+const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk = {
+ .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+ .soc_latency = 5000,
+ .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
+};
+
MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
index 007bfe7656a4..08d3d8a190f6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -187,20 +189,4 @@ struct iwl_card_state_notif {
__le32 flags;
} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */
-/**
- * struct iwl_fseq_ver_mismatch_nty - Notification about version
- *
- * This notification does not have a direct impact on the init flow.
- * It means that another core (not WiFi) has initiated the FSEQ flow
- * and updated the FSEQ version. The driver only prints an error when
- * this occurs.
- *
- * @aux_read_fseq_ver: auxiliary read FSEQ version
- * @wifi_fseq_ver: FSEQ version (embedded in WiFi)
- */
-struct iwl_fseq_ver_mismatch_ntf {
- __le32 aux_read_fseq_ver;
- __le32 wifi_fseq_ver;
-} __packed; /* FSEQ_VER_MISMATCH_NTFY_API_S_VER_1 */
-
#endif /* __iwl_fw_api_alive_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index f285bacc8726..6dad748e5cdc 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -193,7 +193,8 @@ enum iwl_legacy_cmds {
FW_GET_ITEM_CMD = 0x1a,
/**
- * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2,
+ * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2 or
+ * &struct iwl_tx_cmd_gen3,
* response in &struct iwl_mvm_tx_resp or
* &struct iwl_mvm_tx_resp_v3
*/
@@ -646,13 +647,6 @@ enum iwl_system_subcmd_ids {
* @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd
*/
INIT_EXTENDED_CFG_CMD = 0x03,
-
- /**
- * @FSEQ_VER_MISMATCH_NTF: Notification about fseq version
- * mismatch during init. The format is specified in
- * &struct iwl_fseq_ver_mismatch_ntf.
- */
- FSEQ_VER_MISMATCH_NTF = 0xFF,
};
#endif /* __iwl_fw_api_commands_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index 5f6e855006dd..59b3c6e8f37b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -83,6 +85,16 @@ enum iwl_data_path_subcmd_ids {
TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
/**
+ * @STA_HE_CTXT_CMD: &struct iwl_he_sta_context_cmd
+ */
+ STA_HE_CTXT_CMD = 0x7,
+
+ /**
+ * @RFH_QUEUE_CONFIG_CMD: &struct iwl_rfh_queue_config
+ */
+ RFH_QUEUE_CONFIG_CMD = 0xD,
+
+ /**
* @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd
*/
TLC_MNG_CONFIG_CMD = 0xF,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index f2e31e040a7b..55594c93b014 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,6 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -279,6 +281,10 @@ enum iwl_mac_filter_flags {
MAC_FILTER_OUT_BCAST = BIT(8),
MAC_FILTER_IN_CRC32 = BIT(11),
MAC_FILTER_IN_PROBE_REQUEST = BIT(12),
+ /**
+ * @MAC_FILTER_IN_11AX: mark BSS as supporting 802.11ax
+ */
+ MAC_FILTER_IN_11AX = BIT(14),
};
/**
@@ -406,4 +412,170 @@ struct iwl_missed_beacons_notif {
__le32 num_recvd_beacons;
} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */
+/**
+ * struct iwl_he_backoff_conf - used for backoff configuration
+ * Per each trigger-based AC, (set by MU EDCA Parameter set info-element)
+ * used for backoff configuration of TXF5..TXF8 trigger based.
+ * The MU-TIMER is reloaded w/ MU_TIME each time a frame from the AC is sent via
+ * trigger-based TX.
+ * @cwmin: CW min
+ * @cwmax: CW max
+ * @aifsn: AIFSN
+ * AIFSN=0, means that no backoff from the specified TRIG-BASED AC is
+ * allowed till the MU-TIMER is 0
+ * @mu_time: MU time in 8TU units
+ */
+struct iwl_he_backoff_conf {
+ __le16 cwmin;
+ __le16 cwmax;
+ __le16 aifsn;
+ __le16 mu_time;
+} __packed; /* AC_QOS_DOT11AX_API_S */
+
+#define MAX_HE_SUPP_NSS 2
+#define MAX_HE_CHANNEL_BW_INDX 4
+
+/**
+ * struct iwl_he_pkt_ext - QAM thresholds
+ * The required PPE is set via HE Capabilities IE, per Nss x BW x MCS
+ * The IE is organized in the following way:
+ * Support for Nss x BW (or RU) matrix:
+ * (0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz)
+ * Each entry contains 2 QAM thresholds for 8us and 16us:
+ * 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6/7=RES
+ * i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx:
+ * QAM_tx < QAM_th1 --> PPE=0us
+ * QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us
+ * QAM_th2 <= QAM_tx --> PPE=16us
+ * @pkt_ext_qam_th: QAM thresholds
+ * For each Nss/Bw define 2 QAM thrsholds (0..5)
+ * For rates below the low_th, no need for PPE
+ * For rates between low_th and high_th, need 8us PPE
+ * For rates equal or higher then the high_th, need 16us PPE
+ * Nss (0-siso, 1-mimo2) x BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) x
+ * (0-low_th, 1-high_th)
+ */
+struct iwl_he_pkt_ext {
+ u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_HE_CHANNEL_BW_INDX][2];
+} __packed; /* PKT_EXT_DOT11AX_API_S */
+
+/**
+ * enum iwl_he_sta_ctxt_flags - HE STA context flags
+ * @STA_CTXT_HE_REF_BSSID_VALID: ref bssid addr valid (for receiving specific
+ * control frames such as TRIG, NDPA, BACK)
+ * @STA_CTXT_HE_BSS_COLOR_DIS: BSS color disable, don't use the BSS
+ * color for RX filter but use MAC header
+ * @STA_CTXT_HE_PARTIAL_BSS_COLOR: partial BSS color allocation
+ * @STA_CTXT_HE_32BIT_BA_BITMAP: indicates the receiver supports BA bitmap
+ * of 32-bits
+ * @STA_CTXT_HE_PACKET_EXT: indicates that the packet-extension info is valid
+ * and should be used
+ * @STA_CTXT_HE_TRIG_RND_ALLOC: indicates that trigger based random allocation
+ * is enabled according to UORA element existence
+ * @STA_CTXT_HE_CONST_TRIG_RND_ALLOC: used for AV testing
+ * @STA_CTXT_HE_ACK_ENABLED: indicates that the AP supports receiving ACK-
+ * enabled AGG, i.e. both BACK and non-BACK frames in a single AGG
+ * @STA_CTXT_HE_MU_EDCA_CW: indicates that there is an element of MU EDCA
+ * parameter set, i.e. the backoff counters for trig-based ACs
+ */
+enum iwl_he_sta_ctxt_flags {
+ STA_CTXT_HE_REF_BSSID_VALID = BIT(4),
+ STA_CTXT_HE_BSS_COLOR_DIS = BIT(5),
+ STA_CTXT_HE_PARTIAL_BSS_COLOR = BIT(6),
+ STA_CTXT_HE_32BIT_BA_BITMAP = BIT(7),
+ STA_CTXT_HE_PACKET_EXT = BIT(8),
+ STA_CTXT_HE_TRIG_RND_ALLOC = BIT(9),
+ STA_CTXT_HE_CONST_TRIG_RND_ALLOC = BIT(10),
+ STA_CTXT_HE_ACK_ENABLED = BIT(11),
+ STA_CTXT_HE_MU_EDCA_CW = BIT(12),
+};
+
+/**
+ * enum iwl_he_htc_flags - HE HTC support flags
+ * @IWL_HE_HTC_SUPPORT: HE-HTC support
+ * @IWL_HE_HTC_UL_MU_RESP_SCHED: HE UL MU response schedule
+ * support via A-control field
+ * @IWL_HE_HTC_BSR_SUPP: BSR support in A-control field
+ * @IWL_HE_HTC_OMI_SUPP: A-OMI support in A-control field
+ * @IWL_HE_HTC_BQR_SUPP: A-BQR support in A-control field
+ */
+enum iwl_he_htc_flags {
+ IWL_HE_HTC_SUPPORT = BIT(0),
+ IWL_HE_HTC_UL_MU_RESP_SCHED = BIT(3),
+ IWL_HE_HTC_BSR_SUPP = BIT(4),
+ IWL_HE_HTC_OMI_SUPP = BIT(5),
+ IWL_HE_HTC_BQR_SUPP = BIT(6),
+};
+
+/*
+ * @IWL_HE_HTC_LINK_ADAP_NO_FEEDBACK: the STA does not provide HE MFB
+ * @IWL_HE_HTC_LINK_ADAP_UNSOLICITED: the STA provides only unsolicited HE MFB
+ * @IWL_HE_HTC_LINK_ADAP_BOTH: the STA is capable of providing HE MFB in
+ * response to HE MRQ and if the STA provides unsolicited HE MFB
+ */
+#define IWL_HE_HTC_LINK_ADAP_POS (1)
+#define IWL_HE_HTC_LINK_ADAP_NO_FEEDBACK (0)
+#define IWL_HE_HTC_LINK_ADAP_UNSOLICITED (2 << IWL_HE_HTC_LINK_ADAP_POS)
+#define IWL_HE_HTC_LINK_ADAP_BOTH (3 << IWL_HE_HTC_LINK_ADAP_POS)
+
+/**
+ * struct iwl_he_sta_context_cmd - configure FW to work with HE AP
+ * @sta_id: STA id
+ * @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
+ * 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit
+ * @reserved1: reserved byte for future use
+ * @reserved2: reserved byte for future use
+ * @flags: see %iwl_11ax_sta_ctxt_flags
+ * @ref_bssid_addr: reference BSSID used by the AP
+ * @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes
+ * @htc_flags: which features are supported in HTC
+ * @frag_flags: frag support in A-MSDU
+ * @frag_level: frag support level
+ * @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2)
+ * @frag_min_size: min frag size (except last frag)
+ * @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa
+ * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame
+ * @htc_trig_based_pkt_ext: default PE in 4us units
+ * @frame_time_rts_th: HE duration RTS threshold, in units of 32us
+ * @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1
+ * @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1
+ * @reserved3: reserved byte for future use
+ * @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues
+ */
+struct iwl_he_sta_context_cmd {
+ u8 sta_id;
+ u8 tid_limit;
+ u8 reserved1;
+ u8 reserved2;
+ __le32 flags;
+
+ /* The below fields are set via Multiple BSSID IE */
+ u8 ref_bssid_addr[6];
+ __le16 reserved0;
+
+ /* The below fields are set via HE-capabilities IE */
+ __le32 htc_flags;
+
+ u8 frag_flags;
+ u8 frag_level;
+ u8 frag_max_num;
+ u8 frag_min_size;
+
+ /* The below fields are set via PPE thresholds element */
+ struct iwl_he_pkt_ext pkt_ext;
+
+ /* The below fields are set via HE-Operation IE */
+ u8 bss_color;
+ u8 htc_trig_based_pkt_ext;
+ __le16 frame_time_rts_th;
+
+ /* Random access parameter set (i.e. RAPS) */
+ u8 rand_alloc_ecwmin;
+ u8 rand_alloc_ecwmax;
+ __le16 reserved3;
+
+ /* The below fields are set via MU EDCA parameter set element */
+ struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
+} __packed; /* STA_CONTEXT_DOT11AX_API_S */
+
#endif /* __iwl_fw_api_mac_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 8d6dc9189985..6c5338364794 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -195,7 +195,6 @@ struct iwl_nvm_get_info_general {
* @NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED: true if 5.2 band enabled
* @NVM_MAC_SKU_FLAGS_802_11N_ENABLED: true if 11n enabled
* @NVM_MAC_SKU_FLAGS_802_11AC_ENABLED: true if 11ac enabled
- * @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled
* @NVM_MAC_SKU_FLAGS_MIMO_DISABLED: true if MIMO disabled
* @NVM_MAC_SKU_FLAGS_WAPI_ENABLED: true if WAPI enabled
* @NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED: true if regulatory checker enabled
@@ -206,6 +205,9 @@ enum iwl_nvm_mac_sku_flags {
NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED = BIT(1),
NVM_MAC_SKU_FLAGS_802_11N_ENABLED = BIT(2),
NVM_MAC_SKU_FLAGS_802_11AC_ENABLED = BIT(3),
+ /**
+ * @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled
+ */
NVM_MAC_SKU_FLAGS_802_11AX_ENABLED = BIT(4),
NVM_MAC_SKU_FLAGS_MIMO_DISABLED = BIT(5),
NVM_MAC_SKU_FLAGS_WAPI_ENABLED = BIT(8),
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index 21e13a315421..087fae91baef 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -314,8 +314,11 @@ enum {
IWL_RATE_MCS_8_INDEX,
IWL_RATE_MCS_9_INDEX,
IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX,
+ IWL_RATE_MCS_10_INDEX,
+ IWL_RATE_MCS_11_INDEX,
+ IWL_LAST_HE_RATE = IWL_RATE_MCS_11_INDEX,
IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1,
- IWL_RATE_COUNT = IWL_LAST_VHT_RATE + 1,
+ IWL_RATE_COUNT = IWL_LAST_HE_RATE + 1,
};
#define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX)
@@ -440,8 +443,8 @@ enum {
#define RATE_LEGACY_RATE_MSK 0xff
/* Bit 10 - OFDM HE */
-#define RATE_MCS_OFDM_HE_POS 10
-#define RATE_MCS_OFDM_HE_MSK BIT(RATE_MCS_OFDM_HE_POS)
+#define RATE_MCS_HE_POS 10
+#define RATE_MCS_HE_MSK BIT(RATE_MCS_HE_POS)
/*
* Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz
@@ -482,15 +485,33 @@ enum {
#define RATE_MCS_BF_MSK (1 << RATE_MCS_BF_POS)
/*
- * Bit 20-21: HE guard interval and LTF type.
- * (0) 1xLTF+1.6us, (1) 2xLTF+0.8us,
- * (2) 2xLTF+1.6us, (3) 4xLTF+3.2us
+ * Bit 20-21: HE LTF type and guard interval
+ * HE (ext) SU:
+ * 0 1xLTF+0.8us
+ * 1 2xLTF+0.8us
+ * 2 2xLTF+1.6us
+ * 3 & SGI (bit 13) clear 4xLTF+3.2us
+ * 3 & SGI (bit 13) set 4xLTF+0.8us
+ * HE MU:
+ * 0 4xLTF+0.8us
+ * 1 2xLTF+0.8us
+ * 2 2xLTF+1.6us
+ * 3 4xLTF+3.2us
+ * HE TRIG:
+ * 0 1xLTF+1.6us
+ * 1 2xLTF+1.6us
+ * 2 4xLTF+3.2us
+ * 3 (does not occur)
*/
#define RATE_MCS_HE_GI_LTF_POS 20
#define RATE_MCS_HE_GI_LTF_MSK (3 << RATE_MCS_HE_GI_LTF_POS)
/* Bit 22-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */
#define RATE_MCS_HE_TYPE_POS 22
+#define RATE_MCS_HE_TYPE_SU (0 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_EXT_SU (1 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_MU (2 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_TRIG (3 << RATE_MCS_HE_TYPE_POS)
#define RATE_MCS_HE_TYPE_MSK (3 << RATE_MCS_HE_TYPE_POS)
/* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */
@@ -501,6 +522,9 @@ enum {
#define RATE_MCS_LDPC_POS 27
#define RATE_MCS_LDPC_MSK (1 << RATE_MCS_LDPC_POS)
+/* Bit 28: (1) 106-tone RX (8 MHz RU), (0) normal bandwidth */
+#define RATE_MCS_HE_106T_POS 28
+#define RATE_MCS_HE_106T_MSK (1 << RATE_MCS_HE_106T_POS)
/* Link Quality definitions */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index 7e570c4a9df0..2f599353c885 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -343,6 +345,169 @@ enum iwl_rx_mpdu_mac_info {
IWL_RX_MPDU_PHY_PHY_INDEX_MASK = 0xf0,
};
+/*
+ * enum iwl_rx_he_phy - HE PHY data
+ */
+enum iwl_rx_he_phy {
+ IWL_RX_HE_PHY_BEAM_CHNG = BIT(0),
+ IWL_RX_HE_PHY_UPLINK = BIT(1),
+ IWL_RX_HE_PHY_BSS_COLOR_MASK = 0xfc,
+ IWL_RX_HE_PHY_SPATIAL_REUSE_MASK = 0xf00,
+ IWL_RX_HE_PHY_SU_EXT_BW10 = BIT(12),
+ IWL_RX_HE_PHY_TXOP_DUR_MASK = 0xfe000,
+ IWL_RX_HE_PHY_LDPC_EXT_SYM = BIT(20),
+ IWL_RX_HE_PHY_PRE_FEC_PAD_MASK = 0x600000,
+ IWL_RX_HE_PHY_PE_DISAMBIG = BIT(23),
+ IWL_RX_HE_PHY_DOPPLER = BIT(24),
+ /* 6 bits reserved */
+ IWL_RX_HE_PHY_DELIM_EOF = BIT(31),
+
+ /* second dword - MU data */
+ IWL_RX_HE_PHY_SIGB_COMPRESSION = BIT_ULL(32 + 0),
+ IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK = 0x1e00000000ULL,
+ IWL_RX_HE_PHY_HE_LTF_NUM_MASK = 0xe000000000ULL,
+ IWL_RX_HE_PHY_RU_ALLOC_SEC80 = BIT_ULL(32 + 8),
+ /* trigger encoded */
+ IWL_RX_HE_PHY_RU_ALLOC_MASK = 0xfe0000000000ULL,
+ IWL_RX_HE_PHY_SIGB_MCS_MASK = 0xf000000000000ULL,
+ /* 1 bit reserved */
+ IWL_RX_HE_PHY_SIGB_DCM = BIT_ULL(32 + 21),
+ IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK = 0xc0000000000000ULL,
+ /* 8 bits reserved */
+};
+
+/**
+ * struct iwl_rx_mpdu_desc_v1 - RX MPDU descriptor
+ */
+struct iwl_rx_mpdu_desc_v1 {
+ /* DW7 - carries rss_hash only when rpa_en == 1 */
+ /**
+ * @rss_hash: RSS hash value
+ */
+ __le32 rss_hash;
+ /* DW8 - carries filter_match only when rpa_en == 1 */
+ /**
+ * @filter_match: filter match value
+ */
+ __le32 filter_match;
+ /* DW9 */
+ /**
+ * @rate_n_flags: RX rate/flags encoding
+ */
+ __le32 rate_n_flags;
+ /* DW10 */
+ /**
+ * @energy_a: energy chain A
+ */
+ u8 energy_a;
+ /**
+ * @energy_b: energy chain B
+ */
+ u8 energy_b;
+ /**
+ * @channel: channel number
+ */
+ u8 channel;
+ /**
+ * @mac_context: MAC context mask
+ */
+ u8 mac_context;
+ /* DW11 */
+ /**
+ * @gp2_on_air_rise: GP2 timer value on air rise (INA)
+ */
+ __le32 gp2_on_air_rise;
+ /* DW12 & DW13 */
+ union {
+ /**
+ * @tsf_on_air_rise:
+ * TSF value on air rise (INA), only valid if
+ * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
+ */
+ __le64 tsf_on_air_rise;
+ /**
+ * @he_phy_data:
+ * HE PHY data, see &enum iwl_rx_he_phy, valid
+ * only if %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set
+ */
+ __le64 he_phy_data;
+ };
+} __packed;
+
+/**
+ * struct iwl_rx_mpdu_desc_v3 - RX MPDU descriptor
+ */
+struct iwl_rx_mpdu_desc_v3 {
+ /* DW7 - carries filter_match only when rpa_en == 1 */
+ /**
+ * @filter_match: filter match value
+ */
+ __le32 filter_match;
+ /* DW8 - carries rss_hash only when rpa_en == 1 */
+ /**
+ * @rss_hash: RSS hash value
+ */
+ __le32 rss_hash;
+ /* DW9 */
+ /**
+ * @partial_hash: 31:0 ip/tcp header hash
+ * w/o some fields (such as IP SRC addr)
+ */
+ __le32 partial_hash;
+ /* DW10 */
+ /**
+ * @raw_xsum: raw xsum value
+ */
+ __le32 raw_xsum;
+ /* DW11 */
+ /**
+ * @rate_n_flags: RX rate/flags encoding
+ */
+ __le32 rate_n_flags;
+ /* DW12 */
+ /**
+ * @energy_a: energy chain A
+ */
+ u8 energy_a;
+ /**
+ * @energy_b: energy chain B
+ */
+ u8 energy_b;
+ /**
+ * @channel: channel number
+ */
+ u8 channel;
+ /**
+ * @mac_context: MAC context mask
+ */
+ u8 mac_context;
+ /* DW13 */
+ /**
+ * @gp2_on_air_rise: GP2 timer value on air rise (INA)
+ */
+ __le32 gp2_on_air_rise;
+ /* DW14 & DW15 */
+ union {
+ /**
+ * @tsf_on_air_rise:
+ * TSF value on air rise (INA), only valid if
+ * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
+ */
+ __le64 tsf_on_air_rise;
+ /**
+ * @he_phy_data:
+ * HE PHY data, see &enum iwl_rx_he_phy, valid
+ * only if %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set
+ */
+ __le64 he_phy_data;
+ };
+ /* DW16 & DW17 */
+ /**
+ * @reserved: reserved
+ */
+ __le32 reserved[2];
+} __packed; /* RX_MPDU_RES_START_API_S_VER_3 */
+
/**
* struct iwl_rx_mpdu_desc - RX MPDU descriptor
*/
@@ -400,51 +565,14 @@ struct iwl_rx_mpdu_desc {
* @reorder_data: &enum iwl_rx_mpdu_reorder_data
*/
__le32 reorder_data;
- /* DW7 - carries rss_hash only when rpa_en == 1 */
- /**
- * @rss_hash: RSS hash value
- */
- __le32 rss_hash;
- /* DW8 - carries filter_match only when rpa_en == 1 */
- /**
- * @filter_match: filter match value
- */
- __le32 filter_match;
- /* DW9 */
- /**
- * @rate_n_flags: RX rate/flags encoding
- */
- __le32 rate_n_flags;
- /* DW10 */
- /**
- * @energy_a: energy chain A
- */
- u8 energy_a;
- /**
- * @energy_b: energy chain B
- */
- u8 energy_b;
- /**
- * @channel: channel number
- */
- u8 channel;
- /**
- * @mac_context: MAC context mask
- */
- u8 mac_context;
- /* DW11 */
- /**
- * @gp2_on_air_rise: GP2 timer value on air rise (INA)
- */
- __le32 gp2_on_air_rise;
- /* DW12 & DW13 */
- /**
- * @tsf_on_air_rise:
- * TSF value on air rise (INA), only valid if
- * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
- */
- __le64 tsf_on_air_rise;
-} __packed;
+
+ union {
+ struct iwl_rx_mpdu_desc_v1 v1;
+ struct iwl_rx_mpdu_desc_v3 v3;
+ };
+} __packed; /* RX_MPDU_RES_START_API_S_VER_3 */
+
+#define IWL_RX_DESC_SIZE_V1 offsetofend(struct iwl_rx_mpdu_desc, v1)
struct iwl_frame_release {
u8 baid;
@@ -587,4 +715,36 @@ struct iwl_ba_window_status_notif {
__le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX];
} __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */
+/**
+ * struct iwl_rfh_queue_config - RX queue configuration
+ * @q_num: Q num
+ * @enable: enable queue
+ * @reserved: alignment
+ * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
+ * @fr_bd_cb: DMA address of freeRB table
+ * @ur_bd_cb: DMA address of used RB table
+ * @fr_bd_wid: Initial index of the free table
+ */
+struct iwl_rfh_queue_data {
+ u8 q_num;
+ u8 enable;
+ __le16 reserved;
+ __le64 urbd_stts_wrptr;
+ __le64 fr_bd_cb;
+ __le64 ur_bd_cb;
+ __le32 fr_bd_wid;
+} __packed; /* RFH_QUEUE_CONFIG_S_VER_1 */
+
+/**
+ * struct iwl_rfh_queue_config - RX queue configuration
+ * @num_queues: number of queues configured
+ * @reserved: alignment
+ * @data: DMA addresses per-queue
+ */
+struct iwl_rfh_queue_config {
+ u8 num_queues;
+ u8 reserved[3];
+ struct iwl_rfh_queue_data data[];
+} __packed; /* RFH_QUEUE_CONFIG_API_S_VER_1 */
+
#endif /* __iwl_fw_api_rx_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index a2a40b515a3c..514b86123d3d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,6 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -320,6 +322,29 @@ struct iwl_tx_cmd_gen2 {
struct ieee80211_hdr hdr[0];
} __packed; /* TX_CMD_API_S_VER_7 */
+/**
+ * struct iwl_tx_cmd_gen3 - TX command struct to FW for 22560 devices
+ * ( TX_CMD = 0x1c )
+ * @len: in bytes of the payload, see below for details
+ * @flags: combination of &enum iwl_tx_cmd_flags
+ * @offload_assist: TX offload configuration
+ * @dram_info: FW internal DRAM storage
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ * cleared. Combination of RATE_MCS_*
+ * @ttl: time to live - packet lifetime limit. The FW should drop if
+ * passed.
+ * @hdr: 802.11 header
+ */
+struct iwl_tx_cmd_gen3 {
+ __le16 len;
+ __le16 flags;
+ __le32 offload_assist;
+ struct iwl_dram_sec_info dram_info;
+ __le32 rate_n_flags;
+ __le64 ttl;
+ struct ieee80211_hdr hdr[0];
+} __packed; /* TX_CMD_API_S_VER_8 */
+
/*
* TX response related data
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c b/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c
deleted file mode 100644
index 6f75985eea66..000000000000
--- a/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2017 Intel Deutschland GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Deutschland GmbH
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#include "iwl-drv.h"
-#include "runtime.h"
-#include "fw/api/commands.h"
-#include "fw/api/alive.h"
-
-static void iwl_fwrt_fseq_ver_mismatch(struct iwl_fw_runtime *fwrt,
- struct iwl_rx_cmd_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_fseq_ver_mismatch_ntf *fseq = (void *)pkt->data;
-
- IWL_ERR(fwrt, "FSEQ version mismatch (aux: %d, wifi: %d)\n",
- __le32_to_cpu(fseq->aux_read_fseq_ver),
- __le32_to_cpu(fseq->wifi_fseq_ver));
-}
-
-void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt,
- struct iwl_rx_cmd_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
-
- switch (cmd) {
- case WIDE_ID(SYSTEM_GROUP, FSEQ_VER_MISMATCH_NTF):
- iwl_fwrt_fseq_ver_mismatch(fwrt, rxb);
- break;
- default:
- break;
- }
-}
-IWL_EXPORT_SYMBOL(iwl_fwrt_handle_notification);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index fa283285fcbe..a31a42e673c4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -243,39 +243,47 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
return;
- /* Pull RXF1 */
- iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0);
- /* Pull RXF2 */
- iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size,
- RXF_DIFF_FROM_PREV, 1);
- /* Pull LMAC2 RXF1 */
- if (fwrt->smem_cfg.num_lmacs > 1)
- iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[1].rxfifo1_size,
- LMAC2_PRPH_OFFSET, 2);
-
- /* Pull TXF data from LMAC1 */
- for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
- /* Mark the number of TXF we're pulling now */
- iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i);
- iwl_fwrt_dump_txf(fwrt, dump_data, cfg->lmac[0].txfifo_size[i],
- 0, i);
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
+ /* Pull RXF1 */
+ iwl_fwrt_dump_rxf(fwrt, dump_data,
+ cfg->lmac[0].rxfifo1_size, 0, 0);
+ /* Pull RXF2 */
+ iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size,
+ RXF_DIFF_FROM_PREV, 1);
+ /* Pull LMAC2 RXF1 */
+ if (fwrt->smem_cfg.num_lmacs > 1)
+ iwl_fwrt_dump_rxf(fwrt, dump_data,
+ cfg->lmac[1].rxfifo1_size,
+ LMAC2_PRPH_OFFSET, 2);
}
- /* Pull TXF data from LMAC2 */
- if (fwrt->smem_cfg.num_lmacs > 1) {
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
+ /* Pull TXF data from LMAC1 */
for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
/* Mark the number of TXF we're pulling now */
- iwl_trans_write_prph(fwrt->trans,
- TXF_LARC_NUM + LMAC2_PRPH_OFFSET,
- i);
+ iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i);
iwl_fwrt_dump_txf(fwrt, dump_data,
- cfg->lmac[1].txfifo_size[i],
- LMAC2_PRPH_OFFSET,
- i + cfg->num_txfifo_entries);
+ cfg->lmac[0].txfifo_size[i], 0, i);
+ }
+
+ /* Pull TXF data from LMAC2 */
+ if (fwrt->smem_cfg.num_lmacs > 1) {
+ for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries;
+ i++) {
+ /* Mark the number of TXF we're pulling now */
+ iwl_trans_write_prph(fwrt->trans,
+ TXF_LARC_NUM +
+ LMAC2_PRPH_OFFSET, i);
+ iwl_fwrt_dump_txf(fwrt, dump_data,
+ cfg->lmac[1].txfifo_size[i],
+ LMAC2_PRPH_OFFSET,
+ i + cfg->num_txfifo_entries);
+ }
}
}
- if (fw_has_capa(&fwrt->fw->ucode_capa,
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
+ fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
/* Pull UMAC internal TXF data from all TXFs */
for (i = 0;
@@ -600,42 +608,54 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
fifo_data_len = 0;
- /* Count RXF2 size */
- if (mem_cfg->rxfifo2_size) {
- /* Add header info */
- fifo_data_len += mem_cfg->rxfifo2_size +
- sizeof(*dump_data) +
- sizeof(struct iwl_fw_error_dump_fifo);
- }
-
- /* Count RXF1 sizes */
- for (i = 0; i < mem_cfg->num_lmacs; i++) {
- if (!mem_cfg->lmac[i].rxfifo1_size)
- continue;
-
- /* Add header info */
- fifo_data_len += mem_cfg->lmac[i].rxfifo1_size +
- sizeof(*dump_data) +
- sizeof(struct iwl_fw_error_dump_fifo);
- }
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
- /* Count TXF sizes */
- for (i = 0; i < mem_cfg->num_lmacs; i++) {
- int j;
+ /* Count RXF2 size */
+ if (mem_cfg->rxfifo2_size) {
+ /* Add header info */
+ fifo_data_len +=
+ mem_cfg->rxfifo2_size +
+ sizeof(*dump_data) +
+ sizeof(struct iwl_fw_error_dump_fifo);
+ }
- for (j = 0; j < mem_cfg->num_txfifo_entries; j++) {
- if (!mem_cfg->lmac[i].txfifo_size[j])
+ /* Count RXF1 sizes */
+ for (i = 0; i < mem_cfg->num_lmacs; i++) {
+ if (!mem_cfg->lmac[i].rxfifo1_size)
continue;
/* Add header info */
fifo_data_len +=
- mem_cfg->lmac[i].txfifo_size[j] +
+ mem_cfg->lmac[i].rxfifo1_size +
sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_fifo);
}
}
- if (fw_has_capa(&fwrt->fw->ucode_capa,
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
+ size_t fifo_const_len = sizeof(*dump_data) +
+ sizeof(struct iwl_fw_error_dump_fifo);
+
+ /* Count TXF sizes */
+ for (i = 0; i < mem_cfg->num_lmacs; i++) {
+ int j;
+
+ for (j = 0; j < mem_cfg->num_txfifo_entries;
+ j++) {
+ if (!mem_cfg->lmac[i].txfifo_size[j])
+ continue;
+
+ /* Add header info */
+ fifo_data_len +=
+ fifo_const_len +
+ mem_cfg->lmac[i].txfifo_size[j];
+ }
+ }
+ }
+
+ if ((fwrt->fw->dbg_dump_mask &
+ BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) &&
+ fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
for (i = 0;
i < ARRAY_SIZE(mem_cfg->internal_txfifo_size);
@@ -652,7 +672,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
}
/* Make room for PRPH registers */
- if (!fwrt->trans->cfg->gen2) {
+ if (!fwrt->trans->cfg->gen2 &&
+ fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) {
for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm);
i++) {
/* The range includes both boundaries */
@@ -667,7 +688,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
}
if (!fwrt->trans->cfg->gen2 &&
- fwrt->trans->cfg->mq_rx_supported) {
+ fwrt->trans->cfg->mq_rx_supported &&
+ fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) {
for (i = 0; i <
ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
/* The range includes both boundaries */
@@ -681,34 +703,42 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
}
}
- if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
+ if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 &&
+ fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG))
radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
}
file_len = sizeof(*dump_file) +
- sizeof(*dump_data) * 3 +
- sizeof(*dump_smem_cfg) +
fifo_data_len +
prph_len +
- radio_len +
- sizeof(*dump_info);
-
- /* Make room for the SMEM, if it exists */
- if (smem_len)
- file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
-
- /* Make room for the secondary SRAM, if it exists */
- if (sram2_len)
- file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
-
- /* Make room for MEM segments */
- for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
- file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
- le32_to_cpu(fw_dbg_mem[i].len);
+ radio_len;
+
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO))
+ file_len += sizeof(*dump_data) + sizeof(*dump_info);
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG))
+ file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg);
+
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
+ /* Make room for the SMEM, if it exists */
+ if (smem_len)
+ file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
+ smem_len;
+
+ /* Make room for the secondary SRAM, if it exists */
+ if (sram2_len)
+ file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
+ sram2_len;
+
+ /* Make room for MEM segments */
+ for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
+ file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
+ le32_to_cpu(fw_dbg_mem[i].len);
+ }
}
/* Make room for fw's virtual image pages, if it exists */
- if (!fwrt->trans->cfg->gen2 &&
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
+ !fwrt->trans->cfg->gen2 &&
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
fwrt->fw_paging_db[0].fw_paging_block)
file_len += fwrt->num_of_paging_blk *
@@ -722,12 +752,14 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
sizeof(*dump_info) + sizeof(*dump_smem_cfg);
}
- if (fwrt->dump.desc)
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
+ fwrt->dump.desc)
file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
fwrt->dump.desc->len;
- if (!fwrt->fw->n_dbg_mem_tlv)
- file_len += sram_len + sizeof(*dump_mem);
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM) &&
+ !fwrt->fw->n_dbg_mem_tlv)
+ file_len += sizeof(*dump_data) + sram_len + sizeof(*dump_mem);
dump_file = vzalloc(file_len);
if (!dump_file) {
@@ -740,48 +772,56 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
dump_data = (void *)dump_file->data;
- dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
- dump_data->len = cpu_to_le32(sizeof(*dump_info));
- dump_info = (void *)dump_data->data;
- dump_info->device_family =
- fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 ?
- cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
- cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
- dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
- memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
- sizeof(dump_info->fw_human_readable));
- strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name,
- sizeof(dump_info->dev_human_readable));
- strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
- sizeof(dump_info->bus_human_readable));
-
- dump_data = iwl_fw_error_next_data(dump_data);
-
- /* Dump shared memory configuration */
- dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
- dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
- dump_smem_cfg = (void *)dump_data->data;
- dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs);
- dump_smem_cfg->num_txfifo_entries =
- cpu_to_le32(mem_cfg->num_txfifo_entries);
- for (i = 0; i < MAX_NUM_LMAC; i++) {
- int j;
-
- for (j = 0; j < TX_FIFO_MAX_NUM; j++)
- dump_smem_cfg->lmac[i].txfifo_size[j] =
- cpu_to_le32(mem_cfg->lmac[i].txfifo_size[j]);
- dump_smem_cfg->lmac[i].rxfifo1_size =
- cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size);
- }
- dump_smem_cfg->rxfifo2_size = cpu_to_le32(mem_cfg->rxfifo2_size);
- dump_smem_cfg->internal_txfifo_addr =
- cpu_to_le32(mem_cfg->internal_txfifo_addr);
- for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) {
- dump_smem_cfg->internal_txfifo_size[i] =
- cpu_to_le32(mem_cfg->internal_txfifo_size[i]);
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
+ dump_data->len = cpu_to_le32(sizeof(*dump_info));
+ dump_info = (void *)dump_data->data;
+ dump_info->device_family =
+ fwrt->trans->cfg->device_family ==
+ IWL_DEVICE_FAMILY_7000 ?
+ cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
+ cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
+ dump_info->hw_step =
+ cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
+ memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
+ sizeof(dump_info->fw_human_readable));
+ strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name,
+ sizeof(dump_info->dev_human_readable) - 1);
+ strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
+ sizeof(dump_info->bus_human_readable) - 1);
+
+ dump_data = iwl_fw_error_next_data(dump_data);
}
- dump_data = iwl_fw_error_next_data(dump_data);
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) {
+ /* Dump shared memory configuration */
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
+ dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
+ dump_smem_cfg = (void *)dump_data->data;
+ dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs);
+ dump_smem_cfg->num_txfifo_entries =
+ cpu_to_le32(mem_cfg->num_txfifo_entries);
+ for (i = 0; i < MAX_NUM_LMAC; i++) {
+ int j;
+ u32 *txf_size = mem_cfg->lmac[i].txfifo_size;
+
+ for (j = 0; j < TX_FIFO_MAX_NUM; j++)
+ dump_smem_cfg->lmac[i].txfifo_size[j] =
+ cpu_to_le32(txf_size[j]);
+ dump_smem_cfg->lmac[i].rxfifo1_size =
+ cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size);
+ }
+ dump_smem_cfg->rxfifo2_size =
+ cpu_to_le32(mem_cfg->rxfifo2_size);
+ dump_smem_cfg->internal_txfifo_addr =
+ cpu_to_le32(mem_cfg->internal_txfifo_addr);
+ for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) {
+ dump_smem_cfg->internal_txfifo_size[i] =
+ cpu_to_le32(mem_cfg->internal_txfifo_size[i]);
+ }
+
+ dump_data = iwl_fw_error_next_data(dump_data);
+ }
/* We only dump the FIFOs if the FW is in error state */
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
@@ -790,7 +830,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
iwl_read_radio_regs(fwrt, &dump_data);
}
- if (fwrt->dump.desc) {
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
+ fwrt->dump.desc) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
fwrt->dump.desc->len);
@@ -805,7 +846,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
if (monitor_dump_only)
goto dump_trans_data;
- if (!fwrt->fw->n_dbg_mem_tlv) {
+ if (!fwrt->fw->n_dbg_mem_tlv &&
+ fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data;
@@ -821,6 +863,9 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
bool success;
+ if (!(fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)))
+ break;
+
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data;
@@ -854,7 +899,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
dump_data = iwl_fw_error_next_data(dump_data);
}
- if (smem_len) {
+ if (smem_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
IWL_DEBUG_INFO(fwrt, "WRT SMEM dump\n");
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
@@ -867,7 +912,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
dump_data = iwl_fw_error_next_data(dump_data);
}
- if (sram2_len) {
+ if (sram2_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
IWL_DEBUG_INFO(fwrt, "WRT SRAM dump\n");
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
@@ -881,7 +926,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
}
/* Dump fw's virtual image */
- if (!fwrt->trans->cfg->gen2 &&
+ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
+ !fwrt->trans->cfg->gen2 &&
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
fwrt->fw_paging_db[0].fw_paging_block) {
IWL_DEBUG_INFO(fwrt, "WRT paging dump\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 9d939cbaf6c6..bbf2b265a06a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -146,6 +146,9 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_FW_GSCAN_CAPA = 50,
IWL_UCODE_TLV_FW_MEM_SEG = 51,
IWL_UCODE_TLV_IML = 52,
+
+ /* TLVs 0x1000-0x2000 are for internal driver usage */
+ IWL_UCODE_TLV_FW_DBG_DUMP_LST = 0x1000,
};
struct iwl_ucode_tlv {
@@ -318,7 +321,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR
* is supported.
* @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
- * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
+ * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan (no longer used)
* @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification
* @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm
* @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related
@@ -889,39 +892,4 @@ struct iwl_fw_dbg_conf_tlv {
struct iwl_fw_dbg_conf_hcmd hcmd;
} __packed;
-/**
- * struct iwl_fw_gscan_capabilities - gscan capabilities supported by FW
- * @max_scan_cache_size: total space allocated for scan results (in bytes).
- * @max_scan_buckets: maximum number of channel buckets.
- * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
- * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
- * @max_scan_reporting_threshold: max possible report threshold. in percentage.
- * @max_hotlist_aps: maximum number of entries for hotlist APs.
- * @max_significant_change_aps: maximum number of entries for significant
- * change APs.
- * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
- * hold.
- * @max_hotlist_ssids: maximum number of entries for hotlist SSIDs.
- * @max_number_epno_networks: max number of epno entries.
- * @max_number_epno_networks_by_ssid: max number of epno entries if ssid is
- * specified.
- * @max_number_of_white_listed_ssid: max number of white listed SSIDs.
- * @max_number_of_black_listed_ssid: max number of black listed SSIDs.
- */
-struct iwl_fw_gscan_capabilities {
- __le32 max_scan_cache_size;
- __le32 max_scan_buckets;
- __le32 max_ap_cache_per_scan;
- __le32 max_rssi_sample_size;
- __le32 max_scan_reporting_threshold;
- __le32 max_hotlist_aps;
- __le32 max_significant_change_aps;
- __le32 max_bssid_history_entries;
- __le32 max_hotlist_ssids;
- __le32 max_number_epno_networks;
- __le32 max_number_epno_networks_by_ssid;
- __le32 max_number_of_white_listed_ssid;
- __le32 max_number_of_black_listed_ssid;
-} __packed;
-
#endif /* __iwl_fw_file_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index f4912382b6af..0861b97c4233 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -193,41 +193,6 @@ struct iwl_fw_cscheme_list {
} __packed;
/**
- * struct iwl_gscan_capabilities - gscan capabilities supported by FW
- * @max_scan_cache_size: total space allocated for scan results (in bytes).
- * @max_scan_buckets: maximum number of channel buckets.
- * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
- * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
- * @max_scan_reporting_threshold: max possible report threshold. in percentage.
- * @max_hotlist_aps: maximum number of entries for hotlist APs.
- * @max_significant_change_aps: maximum number of entries for significant
- * change APs.
- * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
- * hold.
- * @max_hotlist_ssids: maximum number of entries for hotlist SSIDs.
- * @max_number_epno_networks: max number of epno entries.
- * @max_number_epno_networks_by_ssid: max number of epno entries if ssid is
- * specified.
- * @max_number_of_white_listed_ssid: max number of white listed SSIDs.
- * @max_number_of_black_listed_ssid: max number of black listed SSIDs.
- */
-struct iwl_gscan_capabilities {
- u32 max_scan_cache_size;
- u32 max_scan_buckets;
- u32 max_ap_cache_per_scan;
- u32 max_rssi_sample_size;
- u32 max_scan_reporting_threshold;
- u32 max_hotlist_aps;
- u32 max_significant_change_aps;
- u32 max_bssid_history_entries;
- u32 max_hotlist_ssids;
- u32 max_number_epno_networks;
- u32 max_number_epno_networks_by_ssid;
- u32 max_number_of_white_listed_ssid;
- u32 max_number_of_black_listed_ssid;
-};
-
-/**
* enum iwl_fw_type - iwlwifi firmware type
* @IWL_FW_DVM: DVM firmware
* @IWL_FW_MVM: MVM firmware
@@ -298,7 +263,7 @@ struct iwl_fw {
size_t n_dbg_mem_tlv;
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
u8 dbg_dest_reg_num;
- struct iwl_gscan_capabilities gscan_capa;
+ u32 dbg_dump_mask;
};
static inline const char *get_fw_dbg_mode_string(int mode)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index d8db1dd100b0..ed23367f7088 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -168,7 +168,4 @@ void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt);
void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt);
-void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt,
- struct iwl_rx_cmd_buffer *rxb);
-
#endif /* __iwl_fw_runtime_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
index fb4b6442b4d7..ff85d69c2a8c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -143,7 +145,7 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
return;
pkt = cmd.resp_pkt;
- if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000)
+ if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000)
iwl_parse_shared_mem_22000(fwrt, pkt);
else
iwl_parse_shared_mem(fwrt, pkt);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index c503b26793f6..12fddcf15bab 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -93,6 +93,7 @@ enum iwl_device_family {
IWL_DEVICE_FAMILY_8000,
IWL_DEVICE_FAMILY_9000,
IWL_DEVICE_FAMILY_22000,
+ IWL_DEVICE_FAMILY_22560,
};
/*
@@ -176,6 +177,7 @@ static inline u8 num_of_ant(u8 mask)
* @apmg_wake_up_wa: should the MAC access REQ be asserted when a command
* is in flight. This is due to a HW bug in 7260, 3160 and 7265.
* @scd_chain_ext_wa: should the chain extension feature in SCD be disabled.
+ * @max_tfd_queue_size: max number of entries in tfd queue.
*/
struct iwl_base_params {
unsigned int wd_timeout;
@@ -191,6 +193,7 @@ struct iwl_base_params {
scd_chain_ext_wa:1;
u16 num_of_queues; /* def: HW dependent */
+ u32 max_tfd_queue_size; /* def: HW dependent */
u8 max_ll_items;
u8 led_compensation;
@@ -551,6 +554,7 @@ extern const struct iwl_cfg iwl8275_2ac_cfg;
extern const struct iwl_cfg iwl4165_2ac_cfg;
extern const struct iwl_cfg iwl9160_2ac_cfg;
extern const struct iwl_cfg iwl9260_2ac_cfg;
+extern const struct iwl_cfg iwl9260_killer_2ac_cfg;
extern const struct iwl_cfg iwl9270_2ac_cfg;
extern const struct iwl_cfg iwl9460_2ac_cfg;
extern const struct iwl_cfg iwl9560_2ac_cfg;
@@ -558,17 +562,23 @@ extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9560_killer_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc;
extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk;
+extern const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk;
+extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
-extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
+extern const struct iwl_cfg iwl22560_2ax_cfg_su_cdb;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
new file mode 100644
index 000000000000..ebea99189ca9
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
@@ -0,0 +1,286 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_context_info_file_gen3_h__
+#define __iwl_context_info_file_gen3_h__
+
+#include "iwl-context-info.h"
+
+#define CSR_CTXT_INFO_BOOT_CTRL 0x0
+#define CSR_CTXT_INFO_ADDR 0x118
+#define CSR_IML_DATA_ADDR 0x120
+#define CSR_IML_SIZE_ADDR 0x128
+#define CSR_IML_RESP_ADDR 0x12c
+
+/* Set bit for enabling automatic function boot */
+#define CSR_AUTO_FUNC_BOOT_ENA BIT(1)
+/* Set bit for initiating function boot */
+#define CSR_AUTO_FUNC_INIT BIT(7)
+
+/**
+ * enum iwl_prph_scratch_mtr_format - tfd size configuration
+ * @IWL_PRPH_MTR_FORMAT_16B: 16 bit tfd
+ * @IWL_PRPH_MTR_FORMAT_32B: 32 bit tfd
+ * @IWL_PRPH_MTR_FORMAT_64B: 64 bit tfd
+ * @IWL_PRPH_MTR_FORMAT_256B: 256 bit tfd
+ */
+enum iwl_prph_scratch_mtr_format {
+ IWL_PRPH_MTR_FORMAT_16B = 0x0,
+ IWL_PRPH_MTR_FORMAT_32B = 0x40000,
+ IWL_PRPH_MTR_FORMAT_64B = 0x80000,
+ IWL_PRPH_MTR_FORMAT_256B = 0xC0000,
+};
+
+/**
+ * enum iwl_prph_scratch_flags - PRPH scratch control flags
+ * @IWL_PRPH_SCRATCH_EARLY_DEBUG_EN: enable early debug conf
+ * @IWL_PRPH_SCRATCH_EDBG_DEST_DRAM: use DRAM, with size allocated
+ * in hwm config.
+ * @IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL: use buffer on SRAM
+ * @IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER: use st arbiter, mainly for
+ * multicomm.
+ * @IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF: route debug data to SoC HW
+ * @IWL_PRPH_SCTATCH_RB_SIZE_4K: Use 4K RB size (the default is 2K)
+ * @IWL_PRPH_SCRATCH_MTR_MODE: format used for completion - 0: for
+ * completion descriptor, 1 for responses (legacy)
+ * @IWL_PRPH_SCRATCH_MTR_FORMAT: a mask for the size of the tfd.
+ * There are 4 optional values: 0: 16 bit, 1: 32 bit, 2: 64 bit,
+ * 3: 256 bit.
+ */
+enum iwl_prph_scratch_flags {
+ IWL_PRPH_SCRATCH_EARLY_DEBUG_EN = BIT(4),
+ IWL_PRPH_SCRATCH_EDBG_DEST_DRAM = BIT(8),
+ IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL = BIT(9),
+ IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER = BIT(10),
+ IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF = BIT(11),
+ IWL_PRPH_SCRATCH_RB_SIZE_4K = BIT(16),
+ IWL_PRPH_SCRATCH_MTR_MODE = BIT(17),
+ IWL_PRPH_SCRATCH_MTR_FORMAT = BIT(18) | BIT(19),
+};
+
+/*
+ * struct iwl_prph_scratch_version - version structure
+ * @mac_id: SKU and revision id
+ * @version: prph scratch information version id
+ * @size: the size of the context information in DWs
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_version {
+ __le16 mac_id;
+ __le16 version;
+ __le16 size;
+ __le16 reserved;
+} __packed; /* PERIPH_SCRATCH_VERSION_S */
+
+/*
+ * struct iwl_prph_scratch_control - control structure
+ * @control_flags: context information flags see &enum iwl_prph_scratch_flags
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_control {
+ __le32 control_flags;
+ __le32 reserved;
+} __packed; /* PERIPH_SCRATCH_CONTROL_S */
+
+/*
+ * struct iwl_prph_scratch_ror_cfg - ror config
+ * @ror_base_addr: ror start address
+ * @ror_size: ror size in DWs
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_ror_cfg {
+ __le64 ror_base_addr;
+ __le32 ror_size;
+ __le32 reserved;
+} __packed; /* PERIPH_SCRATCH_ROR_CFG_S */
+
+/*
+ * struct iwl_prph_scratch_hwm_cfg - hwm config
+ * @hwm_base_addr: hwm start address
+ * @hwm_size: hwm size in DWs
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_hwm_cfg {
+ __le64 hwm_base_addr;
+ __le32 hwm_size;
+ __le32 reserved;
+} __packed; /* PERIPH_SCRATCH_HWM_CFG_S */
+
+/*
+ * struct iwl_prph_scratch_rbd_cfg - RBDs configuration
+ * @free_rbd_addr: default queue free RB CB base address
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_rbd_cfg {
+ __le64 free_rbd_addr;
+ __le32 reserved;
+} __packed; /* PERIPH_SCRATCH_RBD_CFG_S */
+
+/*
+ * struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config
+ * @version: version information of context info and HW
+ * @control: control flags of FH configurations
+ * @ror_cfg: ror configuration
+ * @hwm_cfg: hwm configuration
+ * @rbd_cfg: default RX queue configuration
+ */
+struct iwl_prph_scratch_ctrl_cfg {
+ struct iwl_prph_scratch_version version;
+ struct iwl_prph_scratch_control control;
+ struct iwl_prph_scratch_ror_cfg ror_cfg;
+ struct iwl_prph_scratch_hwm_cfg hwm_cfg;
+ struct iwl_prph_scratch_rbd_cfg rbd_cfg;
+} __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */
+
+/*
+ * struct iwl_prph_scratch - peripheral scratch mapping
+ * @ctrl_cfg: control and configuration of prph scratch
+ * @dram: firmware images addresses in DRAM
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch {
+ struct iwl_prph_scratch_ctrl_cfg ctrl_cfg;
+ __le32 reserved[16];
+ struct iwl_context_info_dram dram;
+} __packed; /* PERIPH_SCRATCH_S */
+
+/*
+ * struct iwl_prph_info - peripheral information
+ * @boot_stage_mirror: reflects the value in the Boot Stage CSR register
+ * @ipc_status_mirror: reflects the value in the IPC Status CSR register
+ * @sleep_notif: indicates the peripheral sleep status
+ * @reserved: reserved
+ */
+struct iwl_prph_info {
+ __le32 boot_stage_mirror;
+ __le32 ipc_status_mirror;
+ __le32 sleep_notif;
+ __le32 reserved;
+} __packed; /* PERIPH_INFO_S */
+
+/*
+ * struct iwl_context_info_gen3 - device INIT configuration
+ * @version: version of the context information
+ * @size: size of context information in DWs
+ * @config: context in which the peripheral would execute - a subset of
+ * capability csr register published by the peripheral
+ * @prph_info_base_addr: the peripheral information structure start address
+ * @cr_head_idx_arr_base_addr: the completion ring head index array
+ * start address
+ * @tr_tail_idx_arr_base_addr: the transfer ring tail index array
+ * start address
+ * @cr_tail_idx_arr_base_addr: the completion ring tail index array
+ * start address
+ * @tr_head_idx_arr_base_addr: the transfer ring head index array
+ * start address
+ * @cr_idx_arr_size: number of entries in the completion ring index array
+ * @tr_idx_arr_size: number of entries in the transfer ring index array
+ * @mtr_base_addr: the message transfer ring start address
+ * @mcr_base_addr: the message completion ring start address
+ * @mtr_size: number of entries which the message transfer ring can hold
+ * @mcr_size: number of entries which the message completion ring can hold
+ * @mtr_doorbell_vec: the doorbell vector associated with the message
+ * transfer ring
+ * @mcr_doorbell_vec: the doorbell vector associated with the message
+ * completion ring
+ * @mtr_msi_vec: the MSI which shall be generated by the peripheral after
+ * completing a transfer descriptor in the message transfer ring
+ * @mcr_msi_vec: the MSI which shall be generated by the peripheral after
+ * completing a completion descriptor in the message completion ring
+ * @mtr_opt_header_size: the size of the optional header in the transfer
+ * descriptor associated with the message transfer ring in DWs
+ * @mtr_opt_footer_size: the size of the optional footer in the transfer
+ * descriptor associated with the message transfer ring in DWs
+ * @mcr_opt_header_size: the size of the optional header in the completion
+ * descriptor associated with the message completion ring in DWs
+ * @mcr_opt_footer_size: the size of the optional footer in the completion
+ * descriptor associated with the message completion ring in DWs
+ * @msg_rings_ctrl_flags: message rings control flags
+ * @prph_info_msi_vec: the MSI which shall be generated by the peripheral
+ * after updating the Peripheral Information structure
+ * @prph_scratch_base_addr: the peripheral scratch structure start address
+ * @prph_scratch_size: the size of the peripheral scratch structure in DWs
+ * @reserved: reserved
+ */
+struct iwl_context_info_gen3 {
+ __le16 version;
+ __le16 size;
+ __le32 config;
+ __le64 prph_info_base_addr;
+ __le64 cr_head_idx_arr_base_addr;
+ __le64 tr_tail_idx_arr_base_addr;
+ __le64 cr_tail_idx_arr_base_addr;
+ __le64 tr_head_idx_arr_base_addr;
+ __le16 cr_idx_arr_size;
+ __le16 tr_idx_arr_size;
+ __le64 mtr_base_addr;
+ __le64 mcr_base_addr;
+ __le16 mtr_size;
+ __le16 mcr_size;
+ __le16 mtr_doorbell_vec;
+ __le16 mcr_doorbell_vec;
+ __le16 mtr_msi_vec;
+ __le16 mcr_msi_vec;
+ u8 mtr_opt_header_size;
+ u8 mtr_opt_footer_size;
+ u8 mcr_opt_header_size;
+ u8 mcr_opt_footer_size;
+ __le16 msg_rings_ctrl_flags;
+ __le16 prph_info_msi_vec;
+ __le64 prph_scratch_base_addr;
+ __le32 prph_scratch_size;
+ __le32 reserved;
+} __packed; /* IPC_CONTEXT_INFO_S */
+
+int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
+ const struct fw_img *fw);
+void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans);
+
+#endif /* __iwl_context_info_file_gen3_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
index b870c0986744..4b6fdf3b15fb 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -19,6 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -199,5 +201,8 @@ struct iwl_context_info {
int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *fw);
void iwl_pcie_ctxt_info_free(struct iwl_trans *trans);
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans);
+int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
+ const struct fw_img *fw,
+ struct iwl_context_info_dram *ctxt_dram);
#endif /* __iwl_context_info_file_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index ba971d3946e2..9019de99f077 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -339,6 +339,9 @@ enum {
/* HW_RF CHIP ID */
#define CSR_HW_RF_ID_TYPE_CHIP_ID(_val) (((_val) >> 12) & 0xFFF)
+/* HW_RF CHIP STEP */
+#define CSR_HW_RF_STEP(_val) (((_val) >> 8) & 0xF)
+
/* EEPROM REG */
#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
@@ -592,6 +595,8 @@ enum msix_fh_int_causes {
enum msix_hw_int_causes {
MSIX_HW_INT_CAUSES_REG_ALIVE = BIT(0),
MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1),
+ MSIX_HW_INT_CAUSES_REG_IPC = BIT(1),
+ MSIX_HW_INT_CAUSES_REG_SW_ERR_V2 = BIT(5),
MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6),
MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7),
MSIX_HW_INT_CAUSES_REG_PERIODIC = BIT(8),
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index c59ce4f8a5ed..c0631255aee7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -402,35 +402,6 @@ static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
return 0;
}
-static void iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data,
- const u32 len)
-{
- struct iwl_fw_gscan_capabilities *fw_capa = (void *)data;
- struct iwl_gscan_capabilities *capa = &fw->gscan_capa;
-
- capa->max_scan_cache_size = le32_to_cpu(fw_capa->max_scan_cache_size);
- capa->max_scan_buckets = le32_to_cpu(fw_capa->max_scan_buckets);
- capa->max_ap_cache_per_scan =
- le32_to_cpu(fw_capa->max_ap_cache_per_scan);
- capa->max_rssi_sample_size = le32_to_cpu(fw_capa->max_rssi_sample_size);
- capa->max_scan_reporting_threshold =
- le32_to_cpu(fw_capa->max_scan_reporting_threshold);
- capa->max_hotlist_aps = le32_to_cpu(fw_capa->max_hotlist_aps);
- capa->max_significant_change_aps =
- le32_to_cpu(fw_capa->max_significant_change_aps);
- capa->max_bssid_history_entries =
- le32_to_cpu(fw_capa->max_bssid_history_entries);
- capa->max_hotlist_ssids = le32_to_cpu(fw_capa->max_hotlist_ssids);
- capa->max_number_epno_networks =
- le32_to_cpu(fw_capa->max_number_epno_networks);
- capa->max_number_epno_networks_by_ssid =
- le32_to_cpu(fw_capa->max_number_epno_networks_by_ssid);
- capa->max_number_of_white_listed_ssid =
- le32_to_cpu(fw_capa->max_number_of_white_listed_ssid);
- capa->max_number_of_black_listed_ssid =
- le32_to_cpu(fw_capa->max_number_of_black_listed_ssid);
-}
-
/*
* Gets uCode section from tlv.
*/
@@ -644,7 +615,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
u32 build, paging_mem_size;
int num_of_cpus;
bool usniffer_req = false;
- bool gscan_capa = false;
if (len < sizeof(*ucode)) {
IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
@@ -1043,6 +1013,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
pieces->dbg_trigger_tlv_len[trigger_id] = tlv_len;
break;
}
+ case IWL_UCODE_TLV_FW_DBG_DUMP_LST: {
+ if (tlv_len != sizeof(u32)) {
+ IWL_ERR(drv,
+ "dbg lst mask size incorrect, skip\n");
+ break;
+ }
+
+ drv->fw.dbg_dump_mask =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
+ }
case IWL_UCODE_TLV_SEC_RT_USNIFFER:
*usniffer_images = true;
iwl_store_ucode_sec(pieces, tlv_data,
@@ -1079,16 +1060,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
paging_mem_size;
break;
case IWL_UCODE_TLV_FW_GSCAN_CAPA:
- /*
- * Don't return an error in case of a shorter tlv_len
- * to enable loading of FW that has an old format
- * of GSCAN capabilities TLV.
- */
- if (tlv_len < sizeof(struct iwl_fw_gscan_capabilities))
- break;
-
- iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len);
- gscan_capa = true;
+ /* ignored */
break;
case IWL_UCODE_TLV_FW_MEM_SEG: {
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
@@ -1153,19 +1125,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
return -EINVAL;
}
- /*
- * If ucode advertises that it supports GSCAN but GSCAN
- * capabilities TLV is not present, or if it has an old format,
- * warn and continue without GSCAN.
- */
- if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
- !gscan_capa) {
- IWL_DEBUG_INFO(drv,
- "GSCAN is supported but capabilities TLV is unavailable\n");
- __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
- capa->_capa);
- }
-
return 0;
invalid_tlv_len:
@@ -1316,6 +1275,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
fw->ucode_capa.standard_phy_calibration_size =
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
+ /* dump all fw memory areas by default */
+ fw->dbg_dump_mask = 0xffffffff;
pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
if (!pieces)
@@ -1787,7 +1748,8 @@ MODULE_PARM_DESC(11n_disable,
"disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX");
module_param_named(amsdu_size, iwlwifi_mod_params.amsdu_size, int, 0444);
MODULE_PARM_DESC(amsdu_size,
- "amsdu size 0: 12K for multi Rx queue devices, 4K for other devices 1:4K 2:8K 3:12K (default 0)");
+ "amsdu size 0: 12K for multi Rx queue devices, 2K for 22560 devices, "
+ "4K for other devices 1:4K 2:8K 3:12K 4: 2K (default 0)");
module_param_named(fw_restart, iwlwifi_mod_params.fw_restart, bool, 0444);
MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)");
@@ -1856,3 +1818,7 @@ module_param_named(remove_when_gone,
0444);
MODULE_PARM_DESC(remove_when_gone,
"Remove dev from PCIe bus if it is deemed inaccessible (default: false)");
+
+module_param_named(disable_11ax, iwlwifi_mod_params.disable_11ax, bool,
+ S_IRUGO);
+MODULE_PARM_DESC(disable_11ax, "Disable HE capabilities (default: false)");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
index 777f5df8a0c6..a4c96215933b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
@@ -7,6 +7,7 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -18,9 +19,7 @@
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
+ * along with this program;
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
@@ -33,6 +32,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -767,7 +767,7 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
if ((cfg->mq_rx_supported &&
- iwlwifi_mod_params.amsdu_size != IWL_AMSDU_4K) ||
+ iwlwifi_mod_params.amsdu_size == IWL_AMSDU_DEF) ||
iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K)
ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 11789ffb6512..df0e9ffff706 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -18,9 +19,7 @@
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
+ * along with this program.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
@@ -33,6 +32,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -434,13 +434,15 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
* RXF to DRAM.
* Once the RXF-to-DRAM DMA is active, this flag is immediately turned off.
*/
-#define RFH_GEN_STATUS 0xA09808
+#define RFH_GEN_STATUS 0xA09808
+#define RFH_GEN_STATUS_GEN3 0xA07824
#define RBD_FETCH_IDLE BIT(29)
#define SRAM_DMA_IDLE BIT(30)
#define RXF_DMA_IDLE BIT(31)
/* DMA configuration */
-#define RFH_RXF_DMA_CFG 0xA09820
+#define RFH_RXF_DMA_CFG 0xA09820
+#define RFH_RXF_DMA_CFG_GEN3 0xA07880
/* RB size */
#define RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */
#define RFH_RXF_DMA_RB_SIZE_POS 16
@@ -643,10 +645,13 @@ struct iwl_rb_status {
#define TFD_QUEUE_SIZE_MAX (256)
+#define TFD_QUEUE_SIZE_MAX_GEN3 (65536)
/* cb size is the exponent - 3 */
#define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3)
#define TFD_QUEUE_SIZE_BC_DUP (64)
#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
+#define TFD_QUEUE_BC_SIZE_GEN3 (TFD_QUEUE_SIZE_MAX_GEN3 + \
+ TFD_QUEUE_SIZE_BC_DUP)
#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
#define IWL_NUM_OF_TBS 20
#define IWL_TFH_NUM_TBS 25
@@ -753,7 +758,7 @@ struct iwl_tfh_tfd {
* For devices up to 22000:
* @tfd_offset 0-12 - tx command byte count
* 12-16 - station index
- * For 22000 and on:
+ * For 22000:
* @tfd_offset 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
* 14-16 - reserved
@@ -762,4 +767,15 @@ struct iwlagn_scd_bc_tbl {
__le16 tfd_offset[TFD_QUEUE_BC_SIZE];
} __packed;
+/**
+ * struct iwl_gen3_bc_tbl scheduler byte count table gen3
+ * For 22560 and on:
+ * @tfd_offset: 0-12 - tx command byte count
+ * 12-13 - number of 64 byte chunks
+ * 14-16 - reserved
+ */
+struct iwl_gen3_bc_tbl {
+ __le16 tfd_offset[TFD_QUEUE_BC_SIZE_GEN3];
+} __packed;
+
#endif /* !__iwl_fh_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index a7dd8a8cddf9..97072cf75bca 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -17,9 +18,7 @@
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
+ * along with this program;
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
@@ -31,6 +30,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -90,6 +90,8 @@ enum iwl_amsdu_size {
IWL_AMSDU_4K = 1,
IWL_AMSDU_8K = 2,
IWL_AMSDU_12K = 3,
+ /* Add 2K at the end to avoid breaking current API */
+ IWL_AMSDU_2K = 4,
};
enum iwl_uapsd_disable {
@@ -144,6 +146,10 @@ struct iwl_mod_params {
bool lar_disable;
bool fw_monitor;
bool disable_11ac;
+ /**
+ * @disable_11ax: disable HE capabilities, default = false
+ */
+ bool disable_11ax;
bool remove_when_gone;
};
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index b815ba38dbdb..b4c3a957c102 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -430,6 +430,13 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
else
vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
break;
+ case IWL_AMSDU_2K:
+ if (cfg->mq_rx_supported)
+ vht_cap->cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
+ else
+ WARN(1, "RB size of 2K is not supported by this device\n");
+ break;
case IWL_AMSDU_4K:
vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
break;
@@ -463,6 +470,101 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map;
}
+static struct ieee80211_sband_iftype_data iwl_he_capa = {
+ .types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP),
+ .he_cap = {
+ .has_he = true,
+ .he_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE,
+ .mac_cap_info[1] =
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
+ IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8,
+ .mac_cap_info[2] =
+ IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP |
+ IEEE80211_HE_MAC_CAP2_ACK_EN,
+ .mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU |
+ IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2,
+ .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU,
+ .phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_DUAL_BAND |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G,
+ .phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS,
+ .phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ,
+ .phy_cap_info[3] =
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
+ .phy_cap_info[4] =
+ IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8,
+ .phy_cap_info[5] =
+ IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
+ IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2,
+ .phy_cap_info[6] =
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
+ .phy_cap_info[7] =
+ IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR |
+ IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP7_MAX_NC_7,
+ .phy_cap_info[8] =
+ IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU,
+ },
+ /*
+ * Set default Tx/Rx HE MCS NSS Support field. Indicate support
+ * for up to 2 spatial streams and all MCS, without any special
+ * cases
+ */
+ .he_mcs_nss_supp = {
+ .rx_mcs_80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80 = cpu_to_le16(0xfffa),
+ .rx_mcs_160 = cpu_to_le16(0xfffa),
+ .tx_mcs_160 = cpu_to_le16(0xfffa),
+ .rx_mcs_80p80 = cpu_to_le16(0xffff),
+ .tx_mcs_80p80 = cpu_to_le16(0xffff),
+ },
+ /*
+ * Set default PPE thresholds, with PPET16 set to 0, PPET8 set
+ * to 7
+ */
+ .ppe_thres = {0x61, 0x1c, 0xc7, 0x71},
+ },
+};
+
+static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband,
+ u8 tx_chains, u8 rx_chains)
+{
+ if (sband->band == NL80211_BAND_2GHZ ||
+ sband->band == NL80211_BAND_5GHZ)
+ sband->iftype_data = &iwl_he_capa;
+ else
+ return;
+
+ sband->n_iftype_data = 1;
+
+ /* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */
+ if ((tx_chains & rx_chains) != ANT_AB) {
+ iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[1] &=
+ ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS;
+ iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[2] &=
+ ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS;
+ }
+}
+
static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const __le16 *nvm_ch_flags, u8 tx_chains,
@@ -483,6 +585,9 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ,
tx_chains, rx_chains);
+ if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
+ iwl_init_he_hw_capab(sband, tx_chains, rx_chains);
+
sband = &data->bands[NL80211_BAND_5GHZ];
sband->band = NL80211_BAND_5GHZ;
sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
@@ -495,6 +600,9 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
tx_chains, rx_chains);
+ if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
+ iwl_init_he_hw_capab(sband, tx_chains, rx_chains);
+
if (n_channels != n_used)
IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
n_used, n_channels);
@@ -1293,6 +1401,8 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
!!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
nvm->sku_cap_11n_enable =
!!(mac_flags & NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
+ nvm->sku_cap_11ax_enable =
+ !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
nvm->sku_cap_band_24ghz_enable =
!!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
nvm->sku_cap_band_52ghz_enable =
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 1b9c627ee34d..279dd7b7a3fb 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -350,6 +350,8 @@ static inline int
iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
{
switch (rb_size) {
+ case IWL_AMSDU_2K:
+ return get_order(2 * 1024);
case IWL_AMSDU_4K:
return get_order(4 * 1024);
case IWL_AMSDU_8K:
@@ -438,6 +440,20 @@ struct iwl_trans_txq_scd_cfg {
};
/**
+ * struct iwl_trans_rxq_dma_data - RX queue DMA data
+ * @fr_bd_cb: DMA address of free BD cyclic buffer
+ * @fr_bd_wid: Initial write index of the free BD cyclic buffer
+ * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
+ * @ur_bd_cb: DMA address of used BD cyclic buffer
+ */
+struct iwl_trans_rxq_dma_data {
+ u64 fr_bd_cb;
+ u32 fr_bd_wid;
+ u64 urbd_stts_wrptr;
+ u64 ur_bd_cb;
+};
+
+/**
* struct iwl_trans_ops - transport specific operations
*
* All the handlers MUST be implemented
@@ -557,6 +573,8 @@ struct iwl_trans_ops {
int cmd_id, int size,
unsigned int queue_wdg_timeout);
void (*txq_free)(struct iwl_trans *trans, int queue);
+ int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
+ struct iwl_trans_rxq_dma_data *data);
void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
bool shared);
@@ -753,6 +771,7 @@ struct iwl_trans {
const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv;
const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
+ u32 dbg_dump_mask;
u8 dbg_dest_reg_num;
enum iwl_plat_pm_mode system_pm_mode;
@@ -945,6 +964,16 @@ iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
cfg, queue_wdg_timeout);
}
+static inline int
+iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
+ struct iwl_trans_rxq_dma_data *data)
+{
+ if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
+ return -ENOTSUPP;
+
+ return trans->ops->rxq_dma_data(trans, queue, data);
+}
+
static inline void
iwl_trans_txq_free(struct iwl_trans *trans, int queue)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 3fcf489f3120..79bdae994822 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1037,6 +1037,13 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR);
#endif
+ /*
+ * TODO: this is needed because the firmware is not stopping
+ * the recording automatically before entering D3. This can
+ * be removed once the FW starts doing that.
+ */
+ iwl_fw_dbg_stop_recording(&mvm->fwrt);
+
/* must be last -- this switches firmware state */
ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
if (ret)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 1c4178f20441..05b77419953c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1150,6 +1150,10 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
struct iwl_rx_mpdu_desc *desc;
int bin_len = count / 2;
int ret = -EINVAL;
+ size_t mpdu_cmd_hdr_size =
+ (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
+ sizeof(struct iwl_rx_mpdu_desc) :
+ IWL_RX_DESC_SIZE_V1;
if (!iwl_mvm_firmware_running(mvm))
return -EIO;
@@ -1168,7 +1172,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
goto out;
/* avoid invalid memory access */
- if (bin_len < sizeof(*pkt) + sizeof(*desc))
+ if (bin_len < sizeof(*pkt) + mpdu_cmd_hdr_size)
goto out;
/* check this is RX packet */
@@ -1179,7 +1183,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
/* check the length in metadata matches actual received length */
desc = (void *)pkt->data;
if (le16_to_cpu(desc->mpdu_len) !=
- (bin_len - sizeof(*desc) - sizeof(*pkt)))
+ (bin_len - mpdu_cmd_hdr_size - sizeof(*pkt)))
goto out;
local_bh_disable();
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 866c91c923be..6bb1a99a197a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -130,6 +130,41 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
}
+static int iwl_configure_rxq(struct iwl_mvm *mvm)
+{
+ int i, num_queues, size;
+ struct iwl_rfh_queue_config *cmd;
+
+ /* Do not configure default queue, it is configured via context info */
+ num_queues = mvm->trans->num_rx_queues - 1;
+
+ size = sizeof(*cmd) + num_queues * sizeof(struct iwl_rfh_queue_data);
+
+ cmd = kzalloc(size, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->num_queues = num_queues;
+
+ for (i = 0; i < num_queues; i++) {
+ struct iwl_trans_rxq_dma_data data;
+
+ cmd->data[i].q_num = i + 1;
+ iwl_trans_get_rxq_dma_data(mvm->trans, i + 1, &data);
+
+ cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb);
+ cmd->data[i].urbd_stts_wrptr =
+ cpu_to_le64(data.urbd_stts_wrptr);
+ cmd->data[i].ur_bd_cb = cpu_to_le64(data.ur_bd_cb);
+ cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid);
+ }
+
+ return iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(DATA_PATH_GROUP,
+ RFH_QUEUE_CONFIG_CMD),
+ 0, size, cmd);
+}
+
static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
{
struct iwl_dqa_enable_cmd dqa_cmd = {
@@ -301,7 +336,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
if (ret) {
struct iwl_trans *trans = mvm->trans;
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22000)
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000)
IWL_ERR(mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS),
@@ -1007,9 +1042,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
/* Init RSS configuration */
- /* TODO - remove 22000 disablement when we have RXQ config API */
- if (iwl_mvm_has_new_rx_api(mvm) &&
- mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_22000) {
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
+ ret = iwl_configure_rxq(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to configure RX queues: %d\n",
+ ret);
+ goto error;
+ }
+ }
+
+ if (iwl_mvm_has_new_rx_api(mvm)) {
ret = iwl_send_rss_cfg_cmd(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 8ba16fc24e3a..b3fd20502abb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -780,6 +780,10 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p)
cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
+ if (vif->bss_conf.assoc && vif->bss_conf.he_support &&
+ !iwlwifi_mod_params.disable_11ax)
+ cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX);
+
return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 26021bc55e98..b15b0d84bb7e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -36,6 +36,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -914,7 +915,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
enum ieee80211_ampdu_mlme_action action = params->action;
u16 tid = params->tid;
u16 *ssn = &params->ssn;
- u8 buf_size = params->buf_size;
+ u16 buf_size = params->buf_size;
bool amsdu = params->amsdu;
u16 timeout = params->timeout;
@@ -1897,6 +1898,194 @@ void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
iwl_mvm_mu_mimo_iface_iterator, notif);
}
+static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit)
+{
+ u8 byte_num = ppe_pos_bit / 8;
+ u8 bit_num = ppe_pos_bit % 8;
+ u8 residue_bits;
+ u8 res;
+
+ if (bit_num <= 5)
+ return (ppe[byte_num] >> bit_num) &
+ (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1);
+
+ /*
+ * If bit_num > 5, we have to combine bits with next byte.
+ * Calculate how many bits we need to take from current byte (called
+ * here "residue_bits"), and add them to bits from next byte.
+ */
+
+ residue_bits = 8 - bit_num;
+
+ res = (ppe[byte_num + 1] &
+ (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) <<
+ residue_bits;
+ res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1);
+
+ return res;
+}
+
+static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, u8 sta_id)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_he_sta_context_cmd sta_ctxt_cmd = {
+ .sta_id = sta_id,
+ .tid_limit = IWL_MAX_TID_COUNT,
+ .bss_color = vif->bss_conf.bss_color,
+ .htc_trig_based_pkt_ext = vif->bss_conf.htc_trig_based_pkt_ext,
+ .frame_time_rts_th =
+ cpu_to_le16(vif->bss_conf.frame_time_rts_th),
+ };
+ struct ieee80211_sta *sta;
+ u32 flags;
+ int i;
+
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]);
+ if (IS_ERR(sta)) {
+ rcu_read_unlock();
+ WARN(1, "Can't find STA to configure HE\n");
+ return;
+ }
+
+ if (!sta->he_cap.has_he) {
+ rcu_read_unlock();
+ return;
+ }
+
+ flags = 0;
+
+ /* HTC flags */
+ if (sta->he_cap.he_cap_elem.mac_cap_info[0] &
+ IEEE80211_HE_MAC_CAP0_HTC_HE)
+ sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT);
+ if ((sta->he_cap.he_cap_elem.mac_cap_info[1] &
+ IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) ||
+ (sta->he_cap.he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) {
+ u8 link_adap =
+ ((sta->he_cap.he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) +
+ (sta->he_cap.he_cap_elem.mac_cap_info[1] &
+ IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION);
+
+ if (link_adap == 2)
+ sta_ctxt_cmd.htc_flags |=
+ cpu_to_le32(IWL_HE_HTC_LINK_ADAP_UNSOLICITED);
+ else if (link_adap == 3)
+ sta_ctxt_cmd.htc_flags |=
+ cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH);
+ }
+ if (sta->he_cap.he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED)
+ sta_ctxt_cmd.htc_flags |=
+ cpu_to_le32(IWL_HE_HTC_UL_MU_RESP_SCHED);
+ if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
+ sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP);
+ if (sta->he_cap.he_cap_elem.mac_cap_info[3] &
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL)
+ sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP);
+ if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
+ sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP);
+
+ /* If PPE Thresholds exist, parse them into a FW-familiar format */
+ if (sta->he_cap.he_cap_elem.phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ u8 nss = (sta->he_cap.ppe_thres[0] &
+ IEEE80211_PPE_THRES_NSS_MASK) + 1;
+ u8 ru_index_bitmap =
+ (sta->he_cap.ppe_thres[0] &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >>
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS;
+ u8 *ppe = &sta->he_cap.ppe_thres[0];
+ u8 ppe_pos_bit = 7; /* Starting after PPE header */
+
+ /*
+ * FW currently supports only nss == MAX_HE_SUPP_NSS
+ *
+ * If nss > MAX: we can ignore values we don't support
+ * If nss < MAX: we can set zeros in other streams
+ */
+ if (nss > MAX_HE_SUPP_NSS) {
+ IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
+ MAX_HE_SUPP_NSS);
+ nss = MAX_HE_SUPP_NSS;
+ }
+
+ for (i = 0; i < nss; i++) {
+ u8 ru_index_tmp = ru_index_bitmap << 1;
+ u8 bw;
+
+ for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; bw++) {
+ ru_index_tmp >>= 1;
+ if (!(ru_index_tmp & 1))
+ continue;
+
+ sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][1] =
+ iwl_mvm_he_get_ppe_val(ppe,
+ ppe_pos_bit);
+ ppe_pos_bit +=
+ IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+ sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][0] =
+ iwl_mvm_he_get_ppe_val(ppe,
+ ppe_pos_bit);
+ ppe_pos_bit +=
+ IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+ }
+ }
+
+ flags |= STA_CTXT_HE_PACKET_EXT;
+ }
+ rcu_read_unlock();
+
+ /* Mark MU EDCA as enabled, unless none detected on some AC */
+ flags |= STA_CTXT_HE_MU_EDCA_CW;
+ for (i = 0; i < AC_NUM; i++) {
+ struct ieee80211_he_mu_edca_param_ac_rec *mu_edca =
+ &mvmvif->queue_params[i].mu_edca_param_rec;
+
+ if (!mvmvif->queue_params[i].mu_edca) {
+ flags &= ~STA_CTXT_HE_MU_EDCA_CW;
+ break;
+ }
+
+ sta_ctxt_cmd.trig_based_txf[i].cwmin =
+ cpu_to_le16(mu_edca->ecw_min_max & 0xf);
+ sta_ctxt_cmd.trig_based_txf[i].cwmax =
+ cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4);
+ sta_ctxt_cmd.trig_based_txf[i].aifsn =
+ cpu_to_le16(mu_edca->aifsn);
+ sta_ctxt_cmd.trig_based_txf[i].mu_time =
+ cpu_to_le16(mu_edca->mu_edca_timer);
+ }
+
+ if (vif->bss_conf.multi_sta_back_32bit)
+ flags |= STA_CTXT_HE_32BIT_BA_BITMAP;
+
+ if (vif->bss_conf.ack_enabled)
+ flags |= STA_CTXT_HE_ACK_ENABLED;
+
+ if (vif->bss_conf.uora_exists) {
+ flags |= STA_CTXT_HE_TRIG_RND_ALLOC;
+
+ sta_ctxt_cmd.rand_alloc_ecwmin =
+ vif->bss_conf.uora_ocw_range & 0x7;
+ sta_ctxt_cmd.rand_alloc_ecwmax =
+ (vif->bss_conf.uora_ocw_range >> 3) & 0x7;
+ }
+
+ /* TODO: support Multi BSSID IE */
+
+ sta_ctxt_cmd.flags = cpu_to_le32(flags);
+
+ if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD,
+ DATA_PATH_GROUP, 0),
+ 0, sizeof(sta_ctxt_cmd), &sta_ctxt_cmd))
+ IWL_ERR(mvm, "Failed to config FW to work HE!\n");
+}
+
static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@@ -1910,8 +2099,13 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
* beacon interval, which was not known when the station interface was
* added.
*/
- if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
+ if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
+ if (vif->bss_conf.he_support &&
+ !iwlwifi_mod_params.disable_11ax)
+ iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id);
+
iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
+ }
/*
* If we're not associated yet, take the (new) BSSID before associating
@@ -4364,13 +4558,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
atomic_set(&mvm->queue_sync_counter,
mvm->trans->num_rx_queues);
- /* TODO - remove this when we have RXQ config API */
- if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000) {
- qmask = BIT(0);
- if (notif->sync)
- atomic_set(&mvm->queue_sync_counter, 1);
- }
-
ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
if (ret) {
IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 6a4ba160c59e..b3987a0a7018 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -654,7 +654,7 @@ struct iwl_mvm_tcm {
struct iwl_mvm_reorder_buffer {
u16 head_sn;
u16 num_stored;
- u8 buf_size;
+ u16 buf_size;
int queue;
u16 last_amsdu;
u8 last_sub_index;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index ff1e518096c5..0e26619fb330 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -448,6 +448,8 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(DQA_ENABLE_CMD),
HCMD_NAME(UPDATE_MU_GROUPS_CMD),
HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
+ HCMD_NAME(STA_HE_CTXT_CMD),
+ HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
HCMD_NAME(STA_PM_NOTIF),
HCMD_NAME(MU_GROUP_MGMT_NOTIF),
HCMD_NAME(RX_QUEUES_NOTIFICATION),
@@ -620,7 +622,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (iwl_mvm_has_new_rx_api(mvm)) {
op_mode->ops = &iwl_mvm_ops_mq;
- trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_desc);
+ trans->rx_mpdu_cmd_hdr_size =
+ (trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560) ?
+ sizeof(struct iwl_rx_mpdu_desc) :
+ IWL_RX_DESC_SIZE_V1;
} else {
op_mode->ops = &iwl_mvm_ops;
trans->rx_mpdu_cmd_hdr_size =
@@ -703,11 +709,17 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
}
/* the hardware splits the A-MSDU */
- if (mvm->cfg->mq_rx_supported)
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ trans_cfg.rx_buf_size = IWL_AMSDU_2K;
+ /* TODO: remove when balanced power mode is fw supported */
+ iwlmvm_mod_params.power_scheme = IWL_POWER_SCHEME_CAM;
+ } else if (mvm->cfg->mq_rx_supported) {
trans_cfg.rx_buf_size = IWL_AMSDU_4K;
+ }
trans->wide_cmd_header = true;
- trans_cfg.bc_table_dword = true;
+ trans_cfg.bc_table_dword =
+ mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560;
trans_cfg.command_groups = iwl_mvm_groups;
trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
@@ -738,6 +750,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
sizeof(trans->dbg_conf_tlv));
trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv;
+ trans->dbg_dump_mask = mvm->fw->dbg_dump_mask;
trans->iml = mvm->fw->iml;
trans->iml_len = mvm->fw->iml_len;
@@ -1003,10 +1016,8 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
list_add_tail(&entry->list, &mvm->async_handlers_list);
spin_unlock(&mvm->async_handlers_lock);
schedule_work(&mvm->async_handlers_wk);
- return;
+ break;
}
-
- iwl_fwrt_handle_notification(&mvm->fwrt, rxb);
}
static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index b8b2b819e8e7..8169d1450b3b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -183,6 +183,43 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
}
}
+static u16 rs_fw_he_ieee80211_mcs_to_rs_mcs(u16 mcs)
+{
+ switch (mcs) {
+ case IEEE80211_HE_MCS_SUPPORT_0_7:
+ return BIT(IWL_TLC_MNG_HT_RATE_MCS7 + 1) - 1;
+ case IEEE80211_HE_MCS_SUPPORT_0_9:
+ return BIT(IWL_TLC_MNG_HT_RATE_MCS9 + 1) - 1;
+ case IEEE80211_HE_MCS_SUPPORT_0_11:
+ return BIT(IWL_TLC_MNG_HT_RATE_MCS11 + 1) - 1;
+ case IEEE80211_HE_MCS_NOT_SUPPORTED:
+ return 0;
+ }
+
+ WARN(1, "invalid HE MCS %d\n", mcs);
+ return 0;
+}
+
+static void
+rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
+ const struct ieee80211_sta_he_cap *he_cap,
+ struct iwl_tlc_config_cmd *cmd)
+{
+ u16 mcs_160 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_160);
+ u16 mcs_80 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80);
+ int i;
+
+ for (i = 0; i < sta->rx_nss && i < MAX_NSS; i++) {
+ u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3;
+ u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3;
+
+ cmd->ht_rates[i][0] =
+ cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80));
+ cmd->ht_rates[i][1] =
+ cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160));
+ }
+}
+
static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband,
struct iwl_tlc_config_cmd *cmd)
@@ -192,6 +229,7 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
unsigned long supp; /* must be unsigned long for for_each_set_bit */
const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
/* non HT rates */
supp = 0;
@@ -202,7 +240,11 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
cmd->non_ht_rates = cpu_to_le16(supp);
cmd->mode = IWL_TLC_MNG_MODE_NON_HT;
- if (vht_cap && vht_cap->vht_supported) {
+ /* HT/VHT rates */
+ if (he_cap && he_cap->has_he) {
+ cmd->mode = IWL_TLC_MNG_MODE_HE;
+ rs_fw_he_set_enabled_rates(sta, he_cap, cmd);
+ } else if (vht_cap && vht_cap->vht_supported) {
cmd->mode = IWL_TLC_MNG_MODE_VHT;
rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd);
} else if (ht_cap && ht_cap->ht_supported) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 642da10b0b7f..30cfd7d50bc9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -363,7 +363,8 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
idx += 1;
if ((idx >= IWL_FIRST_HT_RATE) && (idx <= IWL_LAST_HT_RATE))
return idx;
- } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+ } else if (rate_n_flags & RATE_MCS_VHT_MSK ||
+ rate_n_flags & RATE_MCS_HE_MSK) {
idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
idx += IWL_RATE_MCS_0_INDEX;
@@ -372,6 +373,9 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
idx++;
if ((idx >= IWL_FIRST_VHT_RATE) && (idx <= IWL_LAST_VHT_RATE))
return idx;
+ if ((rate_n_flags & RATE_MCS_HE_MSK) &&
+ (idx <= IWL_LAST_HE_RATE))
+ return idx;
} else {
/* legacy rate format, search for match in table */
@@ -516,6 +520,8 @@ static const char *rs_pretty_lq_type(enum iwl_table_type type)
[LQ_HT_MIMO2] = "HT MIMO",
[LQ_VHT_SISO] = "VHT SISO",
[LQ_VHT_MIMO2] = "VHT MIMO",
+ [LQ_HE_SISO] = "HE SISO",
+ [LQ_HE_MIMO2] = "HE MIMO",
};
if (type < LQ_NONE || type >= LQ_MAX)
@@ -900,7 +906,8 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
/* Legacy */
if (!(ucode_rate & RATE_MCS_HT_MSK) &&
- !(ucode_rate & RATE_MCS_VHT_MSK)) {
+ !(ucode_rate & RATE_MCS_VHT_MSK) &&
+ !(ucode_rate & RATE_MCS_HE_MSK)) {
if (num_of_ant == 1) {
if (band == NL80211_BAND_5GHZ)
rate->type = LQ_LEGACY_A;
@@ -911,7 +918,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
return 0;
}
- /* HT or VHT */
+ /* HT, VHT or HE */
if (ucode_rate & RATE_MCS_SGI_MSK)
rate->sgi = true;
if (ucode_rate & RATE_MCS_LDPC_MSK)
@@ -953,10 +960,24 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
} else {
WARN_ON_ONCE(1);
}
+ } else if (ucode_rate & RATE_MCS_HE_MSK) {
+ nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >>
+ RATE_VHT_MCS_NSS_POS) + 1;
+
+ if (nss == 1) {
+ rate->type = LQ_HE_SISO;
+ WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1,
+ "stbc %d bfer %d", rate->stbc, rate->bfer);
+ } else if (nss == 2) {
+ rate->type = LQ_HE_MIMO2;
+ WARN_ON_ONCE(num_of_ant != 2);
+ } else {
+ WARN_ON_ONCE(1);
+ }
}
WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_80 &&
- !is_vht(rate));
+ !is_he(rate) && !is_vht(rate));
return 0;
}
@@ -3606,7 +3627,8 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
u8 ant = (rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS;
if (!(rate & RATE_MCS_HT_MSK) &&
- !(rate & RATE_MCS_VHT_MSK)) {
+ !(rate & RATE_MCS_VHT_MSK) &&
+ !(rate & RATE_MCS_HE_MSK)) {
int index = iwl_hwrate_to_plcp_idx(rate);
return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps\n",
@@ -3625,6 +3647,11 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
mcs = rate & RATE_HT_MCS_INDEX_MSK;
nss = ((rate & RATE_HT_MCS_NSS_MSK)
>> RATE_HT_MCS_NSS_POS) + 1;
+ } else if (rate & RATE_MCS_HE_MSK) {
+ type = "HE";
+ mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
+ nss = ((rate & RATE_VHT_MCS_NSS_MSK)
+ >> RATE_VHT_MCS_NSS_POS) + 1;
} else {
type = "Unknown"; /* shouldn't happen */
}
@@ -3886,6 +3913,8 @@ static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file,
[IWL_RATE_MCS_7_INDEX] = "MCS7",
[IWL_RATE_MCS_8_INDEX] = "MCS8",
[IWL_RATE_MCS_9_INDEX] = "MCS9",
+ [IWL_RATE_MCS_10_INDEX] = "MCS10",
+ [IWL_RATE_MCS_11_INDEX] = "MCS11",
};
char *buff, *pos, *endpos;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index cffb8c852934..d2cf484e2b73 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -144,8 +144,13 @@ enum {
#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63)
#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (64)
-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (64)
+/*
+ * FIXME - various places in firmware API still use u8,
+ * e.g. LQ command and SCD config command.
+ * This should be 256 instead.
+ */
+#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (255)
+#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (255)
#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
@@ -162,6 +167,8 @@ enum iwl_table_type {
LQ_HT_MIMO2,
LQ_VHT_SISO, /* VHT types */
LQ_VHT_MIMO2,
+ LQ_HE_SISO, /* HE types */
+ LQ_HE_MIMO2,
LQ_MAX,
};
@@ -183,11 +190,16 @@ struct rs_rate {
#define is_type_ht_mimo2(type) ((type) == LQ_HT_MIMO2)
#define is_type_vht_siso(type) ((type) == LQ_VHT_SISO)
#define is_type_vht_mimo2(type) ((type) == LQ_VHT_MIMO2)
-#define is_type_siso(type) (is_type_ht_siso(type) || is_type_vht_siso(type))
-#define is_type_mimo2(type) (is_type_ht_mimo2(type) || is_type_vht_mimo2(type))
+#define is_type_he_siso(type) ((type) == LQ_HE_SISO)
+#define is_type_he_mimo2(type) ((type) == LQ_HE_MIMO2)
+#define is_type_siso(type) (is_type_ht_siso(type) || is_type_vht_siso(type) || \
+ is_type_he_siso(type))
+#define is_type_mimo2(type) (is_type_ht_mimo2(type) || \
+ is_type_vht_mimo2(type) || is_type_he_mimo2(type))
#define is_type_mimo(type) (is_type_mimo2(type))
#define is_type_ht(type) (is_type_ht_siso(type) || is_type_ht_mimo2(type))
#define is_type_vht(type) (is_type_vht_siso(type) || is_type_vht_mimo2(type))
+#define is_type_he(type) (is_type_he_siso(type) || is_type_he_mimo2(type))
#define is_type_a_band(type) ((type) == LQ_LEGACY_A)
#define is_type_g_band(type) ((type) == LQ_LEGACY_G)
@@ -201,6 +213,7 @@ struct rs_rate {
#define is_mimo(rate) is_type_mimo((rate)->type)
#define is_ht(rate) is_type_ht((rate)->type)
#define is_vht(rate) is_type_vht((rate)->type)
+#define is_he(rate) is_type_he((rate)->type)
#define is_a_band(rate) is_type_a_band((rate)->type)
#define is_g_band(rate) is_type_g_band((rate)->type)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 129c4c09648d..b53148f972a4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -196,22 +198,31 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
struct sk_buff *skb, int queue,
struct ieee80211_sta *sta)
{
- if (iwl_mvm_check_pn(mvm, skb, queue, sta))
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+
+ if (iwl_mvm_check_pn(mvm, skb, queue, sta)) {
kfree_skb(skb);
- else
+ } else {
+ unsigned int radiotap_len = 0;
+
+ if (rx_status->flag & RX_FLAG_RADIOTAP_HE)
+ radiotap_len += sizeof(struct ieee80211_radiotap_he);
+ if (rx_status->flag & RX_FLAG_RADIOTAP_HE_MU)
+ radiotap_len += sizeof(struct ieee80211_radiotap_he_mu);
+ __skb_push(skb, radiotap_len);
ieee80211_rx_napi(mvm->hw, sta, skb, napi);
+ }
}
static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
- struct iwl_rx_mpdu_desc *desc,
- struct ieee80211_rx_status *rx_status)
+ struct ieee80211_rx_status *rx_status,
+ u32 rate_n_flags, int energy_a,
+ int energy_b)
{
- int energy_a, energy_b, max_energy;
- u32 rate_flags = le32_to_cpu(desc->rate_n_flags);
+ int max_energy;
+ u32 rate_flags = rate_n_flags;
- energy_a = desc->energy_a;
energy_a = energy_a ? -energy_a : S8_MIN;
- energy_b = desc->energy_b;
energy_b = energy_b ? -energy_b : S8_MIN;
max_energy = max(energy_a, energy_b);
@@ -356,7 +367,8 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
tid = IWL_MAX_TID_COUNT;
/* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
- sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+ sub_frame_idx = desc->amsdu_info &
+ IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
dup_data->last_seq[tid] == hdr->seq_ctrl &&
@@ -850,17 +862,41 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct ieee80211_rx_status *rx_status;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
- struct ieee80211_hdr *hdr = (void *)(pkt->data + sizeof(*desc));
+ struct ieee80211_hdr *hdr;
u32 len = le16_to_cpu(desc->mpdu_len);
- u32 rate_n_flags = le32_to_cpu(desc->rate_n_flags);
+ u32 rate_n_flags, gp2_on_air_rise;
u16 phy_info = le16_to_cpu(desc->phy_info);
struct ieee80211_sta *sta = NULL;
struct sk_buff *skb;
- u8 crypt_len = 0;
+ u8 crypt_len = 0, channel, energy_a, energy_b;
+ struct ieee80211_radiotap_he *he = NULL;
+ struct ieee80211_radiotap_he_mu *he_mu = NULL;
+ u32 he_type = 0xffffffff;
+ /* this is invalid e.g. because puncture type doesn't allow 0b11 */
+#define HE_PHY_DATA_INVAL ((u64)-1)
+ u64 he_phy_data = HE_PHY_DATA_INVAL;
+ size_t desc_size;
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
return;
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
+ channel = desc->v3.channel;
+ gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
+ energy_a = desc->v3.energy_a;
+ energy_b = desc->v3.energy_b;
+ desc_size = sizeof(*desc);
+ } else {
+ rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags);
+ channel = desc->v1.channel;
+ gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise);
+ energy_a = desc->v1.energy_a;
+ energy_b = desc->v1.energy_b;
+ desc_size = IWL_RX_DESC_SIZE_V1;
+ }
+
+ hdr = (void *)(pkt->data + desc_size);
/* Dont use dev_alloc_skb(), we'll have enough headroom once
* ieee80211_hdr pulled.
*/
@@ -882,6 +918,51 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status = IEEE80211_SKB_RXCB(skb);
+ if (rate_n_flags & RATE_MCS_HE_MSK) {
+ static const struct ieee80211_radiotap_he known = {
+ .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN),
+ .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN),
+ };
+ static const struct ieee80211_radiotap_he_mu mu_known = {
+ .flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN),
+ .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN),
+ };
+ unsigned int radiotap_len = 0;
+
+ he = skb_put_data(skb, &known, sizeof(known));
+ radiotap_len += sizeof(known);
+ rx_status->flag |= RX_FLAG_RADIOTAP_HE;
+
+ he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
+
+ if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) {
+ if (mvm->trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560)
+ he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+ else
+ he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
+
+ if (he_type == RATE_MCS_HE_TYPE_MU) {
+ he_mu = skb_put_data(skb, &mu_known,
+ sizeof(mu_known));
+ radiotap_len += sizeof(mu_known);
+ rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
+ }
+ }
+
+ /* temporarily hide the radiotap data */
+ __skb_pull(skb, radiotap_len);
+ }
+
+ rx_status = IEEE80211_SKB_RXCB(skb);
+
if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, phy_info, desc,
le32_to_cpu(pkt->len_n_flags), queue,
&crypt_len)) {
@@ -904,20 +985,80 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
- rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise);
+ u64 tsf_on_air_rise;
+
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ tsf_on_air_rise = le64_to_cpu(desc->v3.tsf_on_air_rise);
+ else
+ tsf_on_air_rise = le64_to_cpu(desc->v1.tsf_on_air_rise);
+
+ rx_status->mactime = tsf_on_air_rise;
/* TSF as indicated by the firmware is at INA time */
rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
+ } else if (he_type == RATE_MCS_HE_TYPE_SU) {
+ u64 he_phy_data;
+
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+ else
+ he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
+
+ he->data1 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN);
+ if (FIELD_GET(IWL_RX_HE_PHY_UPLINK,
+ he_phy_data))
+ he->data3 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
+
+ if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+ rx_status->ampdu_reference = mvm->ampdu_ref;
+ mvm->ampdu_ref++;
+
+ rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+ if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
+ he_phy_data))
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
+ }
+ } else if (he_mu && he_phy_data != HE_PHY_DATA_INVAL) {
+ he_mu->flags1 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
+ he_mu->flags1 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_DCM,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
+ he_mu->flags1 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_MCS_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
+ he_mu->flags2 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_COMPRESSION,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
+ he_mu->flags2 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
}
- rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise);
- rx_status->band = desc->channel > 14 ? NL80211_BAND_5GHZ :
- NL80211_BAND_2GHZ;
- rx_status->freq = ieee80211_channel_to_frequency(desc->channel,
+ rx_status->device_timestamp = gp2_on_air_rise;
+ rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
+ NL80211_BAND_2GHZ;
+ rx_status->freq = ieee80211_channel_to_frequency(channel,
rx_status->band);
- iwl_mvm_get_signal_strength(mvm, desc, rx_status);
+ iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a,
+ energy_b);
/* update aggregation data for monitor sake on default queue */
if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
+ u64 he_phy_data;
+
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+ else
+ he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
rx_status->ampdu_reference = mvm->ampdu_ref;
@@ -925,6 +1066,15 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (toggle_bit != mvm->ampdu_toggle) {
mvm->ampdu_ref++;
mvm->ampdu_toggle = toggle_bit;
+
+ if (he_phy_data != HE_PHY_DATA_INVAL &&
+ he_type == RATE_MCS_HE_TYPE_MU) {
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+ if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
+ he_phy_data))
+ rx_status->flag |=
+ RX_FLAG_AMPDU_EOF_BIT;
+ }
}
}
@@ -1033,7 +1183,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
}
}
- /* Set up the HT phy flags */
switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
case RATE_MCS_CHAN_WIDTH_20:
break;
@@ -1048,6 +1197,70 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
break;
}
+ if (he_type == RATE_MCS_HE_TYPE_EXT_SU &&
+ rate_n_flags & RATE_MCS_HE_106T_MSK) {
+ rx_status->bw = RATE_INFO_BW_HE_RU;
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ }
+
+ if (rate_n_flags & RATE_MCS_HE_MSK &&
+ phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD &&
+ he_type == RATE_MCS_HE_TYPE_MU) {
+ /*
+ * Unfortunately, we have to leave the mac80211 data
+ * incorrect for the case that we receive an HE-MU
+ * transmission and *don't* have the he_mu pointer,
+ * i.e. we don't have the phy data (due to the bits
+ * being used for TSF). This shouldn't happen though
+ * as management frames where we need the TSF/timers
+ * are not be transmitted in HE-MU, I think.
+ */
+ u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data);
+ u8 offs = 0;
+
+ rx_status->bw = RATE_INFO_BW_HE_RU;
+
+ switch (ru) {
+ case 0 ... 36:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ offs = ru;
+ break;
+ case 37 ... 52:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+ offs = ru - 37;
+ break;
+ case 53 ... 60:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ offs = ru - 53;
+ break;
+ case 61 ... 64:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ offs = ru - 61;
+ break;
+ case 65 ... 66:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ offs = ru - 65;
+ break;
+ case 67:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ case 68:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+ break;
+ }
+ he->data2 |=
+ le16_encode_bits(offs,
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
+ he->data2 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN);
+ if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80)
+ he->data2 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
+ } else if (he) {
+ he->data1 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
+ }
+
if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
rate_n_flags & RATE_MCS_SGI_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
@@ -1072,6 +1285,119 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
if (rate_n_flags & RATE_MCS_BF_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_BF;
+ } else if (he) {
+ u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
+ RATE_MCS_STBC_POS;
+ rx_status->nss =
+ ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+ RATE_VHT_MCS_NSS_POS) + 1;
+ rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+ rx_status->encoding = RX_ENC_HE;
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+ if (rate_n_flags & RATE_MCS_BF_MSK)
+ rx_status->enc_flags |= RX_ENC_FLAG_BF;
+
+ rx_status->he_dcm =
+ !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
+
+#define CHECK_TYPE(F) \
+ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \
+ (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
+
+ CHECK_TYPE(SU);
+ CHECK_TYPE(EXT_SU);
+ CHECK_TYPE(MU);
+ CHECK_TYPE(TRIG);
+
+ he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS);
+
+ if (rate_n_flags & RATE_MCS_BF_POS)
+ he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF);
+
+ switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >>
+ RATE_MCS_HE_GI_LTF_POS) {
+ case 0:
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ break;
+ case 1:
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ break;
+ case 2:
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ break;
+ case 3:
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ else
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ }
+
+ switch (he_type) {
+ case RATE_MCS_HE_TYPE_SU: {
+ u16 val;
+
+ /* LTF syms correspond to streams */
+ he->data2 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
+ switch (rx_status->nss) {
+ case 1:
+ val = 0;
+ break;
+ case 2:
+ val = 1;
+ break;
+ case 3:
+ case 4:
+ val = 2;
+ break;
+ case 5:
+ case 6:
+ val = 3;
+ break;
+ case 7:
+ case 8:
+ val = 4;
+ break;
+ default:
+ WARN_ONCE(1, "invalid nss: %d\n",
+ rx_status->nss);
+ val = 0;
+ }
+ he->data5 |=
+ le16_encode_bits(val,
+ IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
+ }
+ break;
+ case RATE_MCS_HE_TYPE_MU: {
+ u16 val;
+ u64 he_phy_data;
+
+ if (mvm->trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560)
+ he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+ else
+ he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
+
+ if (he_phy_data == HE_PHY_DATA_INVAL)
+ break;
+
+ val = FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK,
+ he_phy_data);
+
+ he->data2 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
+ he->data5 |=
+ cpu_to_le16(FIELD_PREP(
+ IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS,
+ val));
+ }
+ break;
+ case RATE_MCS_HE_TYPE_EXT_SU:
+ case RATE_MCS_HE_TYPE_TRIG:
+ /* not supported yet */
+ break;
+ }
} else {
int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
rx_status->band);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 9263b9aa8b72..18db1ed92d9b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -2184,7 +2184,7 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
struct iwl_mvm_baid_data *data,
- u16 ssn, u8 buf_size)
+ u16 ssn, u16 buf_size)
{
int i;
@@ -2211,7 +2211,7 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
}
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
+ int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
{
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_add_sta_cmd cmd = {};
@@ -2273,7 +2273,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (start) {
cmd.add_immediate_ba_tid = (u8) tid;
cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
- cmd.rx_ba_window = cpu_to_le16((u16)buf_size);
+ cmd.rx_ba_window = cpu_to_le16(buf_size);
} else {
cmd.remove_immediate_ba_tid = (u8) tid;
}
@@ -2559,7 +2559,7 @@ out:
}
int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid, u8 buf_size,
+ struct ieee80211_sta *sta, u16 tid, u16 buf_size,
bool amsdu)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 1c43ea8dd8cc..0fc211108149 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -412,7 +412,7 @@ struct iwl_mvm_sta {
u32 tfd_queue_msk;
u32 mac_id_n_color;
u16 tid_disable_agg;
- u8 max_agg_bufsize;
+ u16 max_agg_bufsize;
enum iwl_sta_type sta_type;
enum ieee80211_sta_state sta_state;
bool bt_reduced_txpower;
@@ -518,11 +518,11 @@ void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
/* AMPDU */
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- int tid, u16 ssn, bool start, u8 buf_size, u16 timeout);
+ int tid, u16 ssn, bool start, u16 buf_size, u16 timeout);
int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid, u8 buf_size,
+ struct ieee80211_sta *sta, u16 tid, u16 buf_size,
bool amsdu);
int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index cf2591f2ac23..ff193dca2020 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -484,13 +484,15 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
/* Make sure we zero enough of dev_cmd */
BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd));
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd));
memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd));
dev_cmd->hdr.cmd = TX_CMD;
if (iwl_mvm_has_new_tx_api(mvm)) {
- struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
u16 offload_assist = 0;
+ u32 rate_n_flags = 0;
+ u16 flags = 0;
if (ieee80211_is_data_qos(hdr->frame_control)) {
u8 *qc = ieee80211_get_qos_ctl(hdr);
@@ -507,25 +509,43 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
!(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
offload_assist |= BIT(TX_CMD_OFFLD_PAD);
- cmd->offload_assist |= cpu_to_le16(offload_assist);
+ if (!info->control.hw_key)
+ flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
- /* Total # bytes to be transmitted */
- cmd->len = cpu_to_le16((u16)skb->len);
+ /* For data packets rate info comes from the fw */
+ if (!(ieee80211_is_data(hdr->frame_control) && sta)) {
+ flags |= IWL_TX_FLAGS_CMD_RATE;
+ rate_n_flags = iwl_mvm_get_tx_rate(mvm, info, sta);
+ }
- /* Copy MAC header from skb into command buffer */
- memcpy(cmd->hdr, hdr, hdrlen);
+ if (mvm->trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560) {
+ struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload;
- if (!info->control.hw_key)
- cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_ENCRYPT_DIS);
+ cmd->offload_assist |= cpu_to_le32(offload_assist);
- /* For data packets rate info comes from the fw */
- if (ieee80211_is_data(hdr->frame_control) && sta)
- goto out;
+ /* Total # bytes to be transmitted */
+ cmd->len = cpu_to_le16((u16)skb->len);
- cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_CMD_RATE);
- cmd->rate_n_flags =
- cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta));
+ /* Copy MAC header from skb into command buffer */
+ memcpy(cmd->hdr, hdr, hdrlen);
+ cmd->flags = cpu_to_le16(flags);
+ cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
+ } else {
+ struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
+
+ cmd->offload_assist |= cpu_to_le16(offload_assist);
+
+ /* Total # bytes to be transmitted */
+ cmd->len = cpu_to_le16((u16)skb->len);
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(cmd->hdr, hdr, hdrlen);
+
+ cmd->flags = cpu_to_le32(flags);
+ cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
+ }
goto out;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
new file mode 100644
index 000000000000..2146fda8da2f
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -0,0 +1,207 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include "iwl-trans.h"
+#include "iwl-fh.h"
+#include "iwl-context-info-gen3.h"
+#include "internal.h"
+#include "iwl-prph.h"
+
+int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
+ const struct fw_img *fw)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_context_info_gen3 *ctxt_info_gen3;
+ struct iwl_prph_scratch *prph_scratch;
+ struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
+ struct iwl_prph_info *prph_info;
+ void *iml_img;
+ u32 control_flags = 0;
+ int ret;
+
+ /* Allocate prph scratch */
+ prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
+ &trans_pcie->prph_scratch_dma_addr,
+ GFP_KERNEL);
+ if (!prph_scratch)
+ return -ENOMEM;
+
+ prph_sc_ctrl = &prph_scratch->ctrl_cfg;
+
+ prph_sc_ctrl->version.version = 0;
+ prph_sc_ctrl->version.mac_id =
+ cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
+ prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);
+
+ control_flags = IWL_PRPH_SCRATCH_RB_SIZE_4K |
+ IWL_PRPH_SCRATCH_MTR_MODE |
+ (IWL_PRPH_MTR_FORMAT_256B &
+ IWL_PRPH_SCRATCH_MTR_FORMAT) |
+ IWL_PRPH_SCRATCH_EARLY_DEBUG_EN |
+ IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
+ prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
+
+ /* initialize RX default queue */
+ prph_sc_ctrl->rbd_cfg.free_rbd_addr =
+ cpu_to_le64(trans_pcie->rxq->bd_dma);
+
+ /* Configure debug, for integration */
+ iwl_pcie_alloc_fw_monitor(trans, 0);
+ prph_sc_ctrl->hwm_cfg.hwm_base_addr =
+ cpu_to_le64(trans_pcie->fw_mon_phys);
+ prph_sc_ctrl->hwm_cfg.hwm_size =
+ cpu_to_le32(trans_pcie->fw_mon_size);
+
+ /* allocate ucode sections in dram and set addresses */
+ ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
+ if (ret) {
+ dma_free_coherent(trans->dev,
+ sizeof(*prph_scratch),
+ prph_scratch,
+ trans_pcie->prph_scratch_dma_addr);
+ return ret;
+ }
+
+ /* Allocate prph information
+ * currently we don't assign to the prph info anything, but it would get
+ * assigned later */
+ prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info),
+ &trans_pcie->prph_info_dma_addr,
+ GFP_KERNEL);
+ if (!prph_info)
+ return -ENOMEM;
+
+ /* Allocate context info */
+ ctxt_info_gen3 = dma_alloc_coherent(trans->dev,
+ sizeof(*ctxt_info_gen3),
+ &trans_pcie->ctxt_info_dma_addr,
+ GFP_KERNEL);
+ if (!ctxt_info_gen3)
+ return -ENOMEM;
+
+ ctxt_info_gen3->prph_info_base_addr =
+ cpu_to_le64(trans_pcie->prph_info_dma_addr);
+ ctxt_info_gen3->prph_scratch_base_addr =
+ cpu_to_le64(trans_pcie->prph_scratch_dma_addr);
+ ctxt_info_gen3->prph_scratch_size =
+ cpu_to_le32(sizeof(*prph_scratch));
+ ctxt_info_gen3->cr_head_idx_arr_base_addr =
+ cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
+ ctxt_info_gen3->tr_tail_idx_arr_base_addr =
+ cpu_to_le64(trans_pcie->rxq->tr_tail_dma);
+ ctxt_info_gen3->cr_tail_idx_arr_base_addr =
+ cpu_to_le64(trans_pcie->rxq->cr_tail_dma);
+ ctxt_info_gen3->cr_idx_arr_size =
+ cpu_to_le16(IWL_NUM_OF_COMPLETION_RINGS);
+ ctxt_info_gen3->tr_idx_arr_size =
+ cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS);
+ ctxt_info_gen3->mtr_base_addr =
+ cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
+ ctxt_info_gen3->mcr_base_addr =
+ cpu_to_le64(trans_pcie->rxq->used_bd_dma);
+ ctxt_info_gen3->mtr_size =
+ cpu_to_le16(TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS));
+ ctxt_info_gen3->mcr_size =
+ cpu_to_le16(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE));
+
+ trans_pcie->ctxt_info_gen3 = ctxt_info_gen3;
+ trans_pcie->prph_info = prph_info;
+ trans_pcie->prph_scratch = prph_scratch;
+
+ /* Allocate IML */
+ iml_img = dma_alloc_coherent(trans->dev, trans->iml_len,
+ &trans_pcie->iml_dma_addr, GFP_KERNEL);
+ if (!iml_img)
+ return -ENOMEM;
+
+ memcpy(iml_img, trans->iml, trans->iml_len);
+
+ iwl_enable_interrupts(trans);
+
+ /* kick FW self load */
+ iwl_write64(trans, CSR_CTXT_INFO_ADDR,
+ trans_pcie->ctxt_info_dma_addr);
+ iwl_write64(trans, CSR_IML_DATA_ADDR,
+ trans_pcie->iml_dma_addr);
+ iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len);
+ iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, CSR_AUTO_FUNC_BOOT_ENA);
+ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT);
+
+ return 0;
+}
+
+void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (!trans_pcie->ctxt_info_gen3)
+ return;
+
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
+ trans_pcie->ctxt_info_gen3,
+ trans_pcie->ctxt_info_dma_addr);
+ trans_pcie->ctxt_info_dma_addr = 0;
+ trans_pcie->ctxt_info_gen3 = NULL;
+
+ iwl_pcie_ctxt_info_free_fw_img(trans);
+
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch),
+ trans_pcie->prph_scratch,
+ trans_pcie->prph_scratch_dma_addr);
+ trans_pcie->prph_scratch_dma_addr = 0;
+ trans_pcie->prph_scratch = NULL;
+
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_info),
+ trans_pcie->prph_info,
+ trans_pcie->prph_info_dma_addr);
+ trans_pcie->prph_info_dma_addr = 0;
+ trans_pcie->prph_info = NULL;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index 3fc4343581ee..b2cd7ef5fc3a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -19,6 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -55,57 +57,6 @@
#include "internal.h"
#include "iwl-prph.h"
-static int iwl_pcie_get_num_sections(const struct fw_img *fw,
- int start)
-{
- int i = 0;
-
- while (start < fw->num_sec &&
- fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
- fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
- start++;
- i++;
- }
-
- return i;
-}
-
-static int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
- const struct fw_desc *sec,
- struct iwl_dram_data *dram)
-{
- dram->block = dma_alloc_coherent(trans->dev, sec->len,
- &dram->physical,
- GFP_KERNEL);
- if (!dram->block)
- return -ENOMEM;
-
- dram->size = sec->len;
- memcpy(dram->block, sec->data, sec->len);
-
- return 0;
-}
-
-static void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
- int i;
-
- if (!dram->fw) {
- WARN_ON(dram->fw_cnt);
- return;
- }
-
- for (i = 0; i < dram->fw_cnt; i++)
- dma_free_coherent(trans->dev, dram->fw[i].size,
- dram->fw[i].block, dram->fw[i].physical);
-
- kfree(dram->fw);
- dram->fw_cnt = 0;
- dram->fw = NULL;
-}
-
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -128,13 +79,12 @@ void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
dram->paging = NULL;
}
-static int iwl_pcie_ctxt_info_init_fw_sec(struct iwl_trans *trans,
- const struct fw_img *fw,
- struct iwl_context_info *ctxt_info)
+int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
+ const struct fw_img *fw,
+ struct iwl_context_info_dram *ctxt_dram)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
- struct iwl_context_info_dram *ctxt_dram = &ctxt_info->dram;
int i, ret, lmac_cnt, umac_cnt, paging_cnt;
if (WARN(dram->paging,
@@ -247,7 +197,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS);
/* allocate ucode sections in dram and set addresses */
- ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info);
+ ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram);
if (ret) {
dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
ctxt_info, trans_pcie->ctxt_info_dma_addr);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 38234bda9017..562cc79288a6 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -545,6 +545,9 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x1550, iwl9260_killer_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)},
@@ -554,6 +557,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
@@ -578,6 +582,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2720, 0x1010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)},
@@ -604,6 +610,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -630,6 +638,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x31DC, 0x1010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x31DC, 0x1030, iwl9560_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x1210, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x31DC, 0x1551, iwl9560_killer_s_2ac_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x1552, iwl9560_killer_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_shared_clk)},
@@ -656,6 +666,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x34F0, 0x1010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x34F0, 0x1030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x34F0, 0x1210, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x34F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x34F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -682,6 +694,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -708,6 +722,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -743,6 +759,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x9DF0, 0x1010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x1030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x1210, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_cfg_soc)},
@@ -771,6 +789,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -797,6 +817,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA370, 0x1010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x1210, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0xA370, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -806,19 +828,32 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA370, 0x42A4, iwl9462_2ac_cfg_soc)},
/* 22000 Series */
- {IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)},
{IWL_PCI_DEVICE(0x2720, 0x0000, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0040, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0078, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x0030, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x1080, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0040, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0078, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)},
+ {IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22560_2ax_cfg_su_cdb)},
+ {IWL_PCI_DEVICE(0x40C0, 0x0010, iwl22560_2ax_cfg_su_cdb)},
+ {IWL_PCI_DEVICE(0x40c0, 0x0090, iwl22560_2ax_cfg_su_cdb)},
+ {IWL_PCI_DEVICE(0x40C0, 0x0310, iwl22560_2ax_cfg_su_cdb)},
+ {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22560_2ax_cfg_su_cdb)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0040, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0070, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0078, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0040, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x00B0, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl22000_2ax_cfg_hr)},
#endif /* CONFIG_IWLMVM */
@@ -981,6 +1016,10 @@ static int iwl_pci_resume(struct device *device)
if (!trans->op_mode)
return 0;
+ /* In WOWLAN, let iwl_trans_pcie_d3_resume do the rest of the work */
+ if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+ return 0;
+
/* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */
iwl_pcie_conf_msix_hw(trans_pcie);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 45ea32796cda..b63d44b7cd7c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -3,6 +3,7 @@
* Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -17,8 +18,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ * this program.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
@@ -45,6 +45,7 @@
#include "iwl-debug.h"
#include "iwl-io.h"
#include "iwl-op-mode.h"
+#include "iwl-drv.h"
/* We need 2 entries for the TX command and header, and another one might
* be needed for potential data in the SKB's head. The remaining ones can
@@ -59,6 +60,7 @@
#define RX_POST_REQ_ALLOC 2
#define RX_CLAIM_REQ_ALLOC 8
#define RX_PENDING_WATERMARK 16
+#define FIRST_RX_QUEUE 512
struct iwl_host_cmd;
@@ -71,6 +73,7 @@ struct iwl_host_cmd;
* @page: driver's pointer to the rxb page
* @invalid: rxb is in driver ownership - not owned by HW
* @vid: index of this rxb in the global table
+ * @size: size used from the buffer
*/
struct iwl_rx_mem_buffer {
dma_addr_t page_dma;
@@ -78,6 +81,7 @@ struct iwl_rx_mem_buffer {
u16 vid;
bool invalid;
struct list_head list;
+ u32 size;
};
/**
@@ -98,14 +102,121 @@ struct isr_statistics {
u32 unhandled;
};
+#define IWL_CD_STTS_OPTIMIZED_POS 0
+#define IWL_CD_STTS_OPTIMIZED_MSK 0x01
+#define IWL_CD_STTS_TRANSFER_STATUS_POS 1
+#define IWL_CD_STTS_TRANSFER_STATUS_MSK 0x0E
+#define IWL_CD_STTS_WIFI_STATUS_POS 4
+#define IWL_CD_STTS_WIFI_STATUS_MSK 0xF0
+
+/**
+ * enum iwl_completion_desc_transfer_status - transfer status (bits 1-3)
+ * @IWL_CD_STTS_END_TRANSFER: successful transfer complete.
+ * In sniffer mode, when split is used, set in last CD completion. (RX)
+ * @IWL_CD_STTS_OVERFLOW: In sniffer mode, when using split - used for
+ * all CD completion. (RX)
+ * @IWL_CD_STTS_ABORTED: CR abort / close flow. (RX)
+ */
+enum iwl_completion_desc_transfer_status {
+ IWL_CD_STTS_UNUSED,
+ IWL_CD_STTS_UNUSED_2,
+ IWL_CD_STTS_END_TRANSFER,
+ IWL_CD_STTS_OVERFLOW,
+ IWL_CD_STTS_ABORTED,
+ IWL_CD_STTS_ERROR,
+};
+
+/**
+ * enum iwl_completion_desc_wifi_status - wifi status (bits 4-7)
+ * @IWL_CD_STTS_VALID: the packet is valid (RX)
+ * @IWL_CD_STTS_FCS_ERR: frame check sequence error (RX)
+ * @IWL_CD_STTS_SEC_KEY_ERR: error handling the security key of rx (RX)
+ * @IWL_CD_STTS_DECRYPTION_ERR: error decrypting the frame (RX)
+ * @IWL_CD_STTS_DUP: duplicate packet (RX)
+ * @IWL_CD_STTS_ICV_MIC_ERR: MIC error (RX)
+ * @IWL_CD_STTS_INTERNAL_SNAP_ERR: problems removing the snap (RX)
+ * @IWL_CD_STTS_SEC_PORT_FAIL: security port fail (RX)
+ * @IWL_CD_STTS_BA_OLD_SN: block ack received old SN (RX)
+ * @IWL_CD_STTS_QOS_NULL: QoS null packet (RX)
+ * @IWL_CD_STTS_MAC_HDR_ERR: MAC header conversion error (RX)
+ * @IWL_CD_STTS_MAX_RETRANS: reached max number of retransmissions (TX)
+ * @IWL_CD_STTS_EX_LIFETIME: exceeded lifetime (TX)
+ * @IWL_CD_STTS_NOT_USED: completed but not used (RX)
+ * @IWL_CD_STTS_REPLAY_ERR: pn check failed, replay error (RX)
+ */
+enum iwl_completion_desc_wifi_status {
+ IWL_CD_STTS_VALID,
+ IWL_CD_STTS_FCS_ERR,
+ IWL_CD_STTS_SEC_KEY_ERR,
+ IWL_CD_STTS_DECRYPTION_ERR,
+ IWL_CD_STTS_DUP,
+ IWL_CD_STTS_ICV_MIC_ERR,
+ IWL_CD_STTS_INTERNAL_SNAP_ERR,
+ IWL_CD_STTS_SEC_PORT_FAIL,
+ IWL_CD_STTS_BA_OLD_SN,
+ IWL_CD_STTS_QOS_NULL,
+ IWL_CD_STTS_MAC_HDR_ERR,
+ IWL_CD_STTS_MAX_RETRANS,
+ IWL_CD_STTS_EX_LIFETIME,
+ IWL_CD_STTS_NOT_USED,
+ IWL_CD_STTS_REPLAY_ERR,
+};
+
+#define IWL_RX_TD_TYPE_MSK 0xff000000
+#define IWL_RX_TD_SIZE_MSK 0x00ffffff
+#define IWL_RX_TD_SIZE_2K BIT(11)
+#define IWL_RX_TD_TYPE 0
+
+/**
+ * struct iwl_rx_transfer_desc - transfer descriptor
+ * @type_n_size: buffer type (bit 0: external buff valid,
+ * bit 1: optional footer valid, bit 2-7: reserved)
+ * and buffer size
+ * @addr: ptr to free buffer start address
+ * @rbid: unique tag of the buffer
+ * @reserved: reserved
+ */
+struct iwl_rx_transfer_desc {
+ __le32 type_n_size;
+ __le64 addr;
+ __le16 rbid;
+ __le16 reserved;
+} __packed;
+
+#define IWL_RX_CD_SIZE 0xffffff00
+
+/**
+ * struct iwl_rx_completion_desc - completion descriptor
+ * @type: buffer type (bit 0: external buff valid,
+ * bit 1: optional footer valid, bit 2-7: reserved)
+ * @status: status of the completion
+ * @reserved1: reserved
+ * @rbid: unique tag of the received buffer
+ * @size: buffer size, masked by IWL_RX_CD_SIZE
+ * @reserved2: reserved
+ */
+struct iwl_rx_completion_desc {
+ u8 type;
+ u8 status;
+ __le16 reserved1;
+ __le16 rbid;
+ __le32 size;
+ u8 reserved2[22];
+} __packed;
+
/**
* struct iwl_rxq - Rx queue
* @id: queue index
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
* Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
+ * In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
* @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
* @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
+ * @tr_tail: driver's pointer to the transmission ring tail buffer
+ * @tr_tail_dma: physical address of the buffer for the transmission ring tail
+ * @cr_tail: driver's pointer to the completion ring tail buffer
+ * @cr_tail_dma: physical address of the buffer for the completion ring tail
* @read: Shared index to newest available Rx buffer
* @write: Shared index to oldest written Rx packet
* @free_count: Number of pre-allocated buffers in rx_free
@@ -125,8 +236,16 @@ struct iwl_rxq {
int id;
void *bd;
dma_addr_t bd_dma;
- __le32 *used_bd;
+ union {
+ void *used_bd;
+ __le32 *bd_32;
+ struct iwl_rx_completion_desc *cd;
+ };
dma_addr_t used_bd_dma;
+ __le16 *tr_tail;
+ dma_addr_t tr_tail_dma;
+ __le16 *cr_tail;
+ dma_addr_t cr_tail_dma;
u32 read;
u32 write;
u32 free_count;
@@ -136,7 +255,7 @@ struct iwl_rxq {
struct list_head rx_free;
struct list_head rx_used;
bool need_update;
- struct iwl_rb_status *rb_stts;
+ void *rb_stts;
dma_addr_t rb_stts_dma;
spinlock_t lock;
struct napi_struct napi;
@@ -175,18 +294,36 @@ struct iwl_dma_ptr {
* iwl_queue_inc_wrap - increment queue index, wrap back to beginning
* @index -- current index
*/
-static inline int iwl_queue_inc_wrap(int index)
+static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
{
- return ++index & (TFD_QUEUE_SIZE_MAX - 1);
+ return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1);
+}
+
+/**
+ * iwl_get_closed_rb_stts - get closed rb stts from different structs
+ * @rxq - the rxq to get the rb stts from
+ */
+static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
+ struct iwl_rxq *rxq)
+{
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ __le16 *rb_stts = rxq->rb_stts;
+
+ return READ_ONCE(*rb_stts);
+ } else {
+ struct iwl_rb_status *rb_stts = rxq->rb_stts;
+
+ return READ_ONCE(rb_stts->closed_rb_num);
+ }
}
/**
* iwl_queue_dec_wrap - decrement queue index, wrap back to end
* @index -- current index
*/
-static inline int iwl_queue_dec_wrap(int index)
+static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
{
- return --index & (TFD_QUEUE_SIZE_MAX - 1);
+ return --index & (trans->cfg->base_params->max_tfd_queue_size - 1);
}
struct iwl_cmd_meta {
@@ -315,6 +452,18 @@ enum iwl_shared_irq_flags {
};
/**
+ * enum iwl_image_response_code - image response values
+ * @IWL_IMAGE_RESP_DEF: the default value of the register
+ * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
+ * @IWL_IMAGE_RESP_FAIL: iml reading failed
+ */
+enum iwl_image_response_code {
+ IWL_IMAGE_RESP_DEF = 0,
+ IWL_IMAGE_RESP_SUCCESS = 1,
+ IWL_IMAGE_RESP_FAIL = 2,
+};
+
+/**
* struct iwl_dram_data
* @physical: page phy pointer
* @block: pointer to the allocated block/page
@@ -347,6 +496,12 @@ struct iwl_self_init_dram {
* @global_table: table mapping received VID from hw to rxb
* @rba: allocator for RX replenishing
* @ctxt_info: context information for FW self init
+ * @ctxt_info_gen3: context information for gen3 devices
+ * @prph_info: prph info for self init
+ * @prph_scratch: prph scratch for self init
+ * @ctxt_info_dma_addr: dma addr of context information
+ * @prph_info_dma_addr: dma addr of prph info
+ * @prph_scratch_dma_addr: dma addr of prph scratch
* @ctxt_info_dma_addr: dma addr of context information
* @init_dram: DRAM data of firmware image (including paging).
* Context information addresses will be taken from here.
@@ -391,8 +546,16 @@ struct iwl_trans_pcie {
struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
struct iwl_rb_allocator rba;
- struct iwl_context_info *ctxt_info;
+ union {
+ struct iwl_context_info *ctxt_info;
+ struct iwl_context_info_gen3 *ctxt_info_gen3;
+ };
+ struct iwl_prph_info *prph_info;
+ struct iwl_prph_scratch *prph_scratch;
dma_addr_t ctxt_info_dma_addr;
+ dma_addr_t prph_info_dma_addr;
+ dma_addr_t prph_scratch_dma_addr;
+ dma_addr_t iml_dma_addr;
struct iwl_self_init_dram init_dram;
struct iwl_trans *trans;
@@ -477,6 +640,20 @@ IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
return (void *)trans->trans_specific;
}
+static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
+ struct msix_entry *entry)
+{
+ /*
+ * Before sending the interrupt the HW disables it to prevent
+ * a nested interrupt. This is done by writing 1 to the corresponding
+ * bit in the mask register. After handling the interrupt, it should be
+ * re-enabled by clearing this bit. This register is defined as
+ * write 1 clear (W1C) register, meaning that it's being clear
+ * by writing 1 to the bit.
+ */
+ iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
+}
+
static inline struct iwl_trans *
iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
{
@@ -504,6 +681,11 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
int iwl_pcie_rx_stop(struct iwl_trans *trans);
void iwl_pcie_rx_free(struct iwl_trans *trans);
+void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
+void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
+int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
+void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
+ struct iwl_rxq *rxq);
/*****************************************************
* ICT - interrupt handling
@@ -588,6 +770,60 @@ static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
}
+#define IWL_NUM_OF_COMPLETION_RINGS 31
+#define IWL_NUM_OF_TRANSFER_RINGS 527
+
+static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
+ int start)
+{
+ int i = 0;
+
+ while (start < fw->num_sec &&
+ fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
+ fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
+ start++;
+ i++;
+ }
+
+ return i;
+}
+
+static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
+ const struct fw_desc *sec,
+ struct iwl_dram_data *dram)
+{
+ dram->block = dma_alloc_coherent(trans->dev, sec->len,
+ &dram->physical,
+ GFP_KERNEL);
+ if (!dram->block)
+ return -ENOMEM;
+
+ dram->size = sec->len;
+ memcpy(dram->block, sec->data, sec->len);
+
+ return 0;
+}
+
+static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
+ int i;
+
+ if (!dram->fw) {
+ WARN_ON(dram->fw_cnt);
+ return;
+ }
+
+ for (i = 0; i < dram->fw_cnt; i++)
+ dma_free_coherent(trans->dev, dram->fw[i].size,
+ dram->fw[i].block, dram->fw[i].physical);
+
+ kfree(dram->fw);
+ dram->fw_cnt = 0;
+ dram->fw = NULL;
+}
+
static inline void iwl_disable_interrupts(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -660,7 +896,7 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
}
}
-static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index)
+static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
{
return index & (q->n_window - 1);
}
@@ -676,6 +912,29 @@ static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
return txq->tfds + trans_pcie->tfd_size * idx;
}
+static inline const char *queue_name(struct device *dev,
+ struct iwl_trans_pcie *trans_p, int i)
+{
+ if (trans_p->shared_vec_mask) {
+ int vec = trans_p->shared_vec_mask &
+ IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
+
+ if (i == 0)
+ return DRV_NAME ": shared IRQ";
+
+ return devm_kasprintf(dev, GFP_KERNEL,
+ DRV_NAME ": queue %d", i + vec);
+ }
+ if (i == 0)
+ return DRV_NAME ": default queue";
+
+ if (i == trans_p->alloc_vecs - 1)
+ return DRV_NAME ": exception";
+
+ return devm_kasprintf(dev, GFP_KERNEL,
+ DRV_NAME ": queue %d", i);
+}
+
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -730,9 +989,13 @@ static inline void iwl_stop_queue(struct iwl_trans *trans,
static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
{
- return q->write_ptr >= q->read_ptr ?
- (i >= q->read_ptr && i < q->write_ptr) :
- !(i < q->read_ptr && i >= q->write_ptr);
+ int index = iwl_pcie_get_cmd_index(q, i);
+ int r = iwl_pcie_get_cmd_index(q, q->read_ptr);
+ int w = iwl_pcie_get_cmd_index(q, q->write_ptr);
+
+ return w >= r ?
+ (index >= r && index < w) :
+ !(index < r && index >= w);
}
static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
@@ -801,7 +1064,7 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
bool was_in_rfkill);
void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
-int iwl_queue_space(const struct iwl_txq *q);
+int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q);
void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
@@ -818,6 +1081,9 @@ void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
#endif
+/* common functions that are used by gen3 transport */
+void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
+
/* transport gen 2 exported functions */
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
const struct fw_img *fw, bool run_in_rfkill);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index d15f5ba2dc77..d017aa2a0a8b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -18,8 +18,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ * this program.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
@@ -37,6 +36,7 @@
#include "iwl-io.h"
#include "internal.h"
#include "iwl-op-mode.h"
+#include "iwl-context-info-gen3.h"
/******************************************************************************
*
@@ -167,7 +167,12 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
*/
int iwl_pcie_rx_stop(struct iwl_trans *trans)
{
- if (trans->cfg->mq_rx_supported) {
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ /* TODO: remove this for 22560 once fw does it */
+ iwl_write_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
+ return iwl_poll_prph_bit(trans, RFH_GEN_STATUS_GEN3,
+ RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
+ } else if (trans->cfg->mq_rx_supported) {
iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
@@ -209,7 +214,11 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
}
rxq->write_actual = round_down(rxq->write, 8);
- if (trans->cfg->mq_rx_supported)
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ iwl_write32(trans, HBUS_TARG_WRPTR,
+ (rxq->write_actual |
+ ((FIRST_RX_QUEUE + rxq->id) << 16)));
+ else if (trans->cfg->mq_rx_supported)
iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
rxq->write_actual);
else
@@ -233,6 +242,25 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
}
}
+static void iwl_pcie_restock_bd(struct iwl_trans *trans,
+ struct iwl_rxq *rxq,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ struct iwl_rx_transfer_desc *bd = rxq->bd;
+
+ bd[rxq->write].type_n_size =
+ cpu_to_le32((IWL_RX_TD_TYPE & IWL_RX_TD_TYPE_MSK) |
+ ((IWL_RX_TD_SIZE_2K >> 8) & IWL_RX_TD_SIZE_MSK));
+ bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
+ bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
+ } else {
+ __le64 *bd = rxq->bd;
+
+ bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
+ }
+}
+
/*
* iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
*/
@@ -254,8 +282,6 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
spin_lock(&rxq->lock);
while (rxq->free_count) {
- __le64 *bd = (__le64 *)rxq->bd;
-
/* Get next free Rx buffer, remove from free list */
rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
list);
@@ -264,7 +290,7 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
/* 12 first bits are expected to be empty */
WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
/* Point to Rx buffer via next RBD in circular buffer */
- bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
+ iwl_pcie_restock_bd(trans, rxq, rxb);
rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
rxq->free_count--;
}
@@ -391,8 +417,8 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
* iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
* allocated buffers.
*/
-static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
- struct iwl_rxq *rxq)
+void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
+ struct iwl_rxq *rxq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_mem_buffer *rxb;
@@ -448,7 +474,7 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
}
}
-static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
+void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
@@ -608,89 +634,174 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data)
iwl_pcie_rx_allocator(trans_pcie->trans);
}
-static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
+static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
- struct device *dev = trans->dev;
- int i;
- int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
- sizeof(__le32);
+ struct iwl_rx_transfer_desc *rx_td;
- if (WARN_ON(trans_pcie->rxq))
- return -EINVAL;
+ if (use_rx_td)
+ return sizeof(*rx_td);
+ else
+ return trans->cfg->mq_rx_supported ? sizeof(__le64) :
+ sizeof(__le32);
+}
- trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
- GFP_KERNEL);
- if (!trans_pcie->rxq)
- return -EINVAL;
+static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
+ struct iwl_rxq *rxq)
+{
+ struct device *dev = trans->dev;
+ bool use_rx_td = (trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560);
+ int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
+
+ if (rxq->bd)
+ dma_free_coherent(trans->dev,
+ free_size * rxq->queue_size,
+ rxq->bd, rxq->bd_dma);
+ rxq->bd_dma = 0;
+ rxq->bd = NULL;
+
+ if (rxq->rb_stts)
+ dma_free_coherent(trans->dev,
+ use_rx_td ? sizeof(__le16) :
+ sizeof(struct iwl_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
+ rxq->rb_stts_dma = 0;
+ rxq->rb_stts = NULL;
+
+ if (rxq->used_bd)
+ dma_free_coherent(trans->dev,
+ (use_rx_td ? sizeof(*rxq->cd) :
+ sizeof(__le32)) * rxq->queue_size,
+ rxq->used_bd, rxq->used_bd_dma);
+ rxq->used_bd_dma = 0;
+ rxq->used_bd = NULL;
+
+ if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
+ return;
- spin_lock_init(&rba->lock);
+ if (rxq->tr_tail)
+ dma_free_coherent(dev, sizeof(__le16),
+ rxq->tr_tail, rxq->tr_tail_dma);
+ rxq->tr_tail_dma = 0;
+ rxq->tr_tail = NULL;
+
+ if (rxq->cr_tail)
+ dma_free_coherent(dev, sizeof(__le16),
+ rxq->cr_tail, rxq->cr_tail_dma);
+ rxq->cr_tail_dma = 0;
+ rxq->cr_tail = NULL;
+}
- for (i = 0; i < trans->num_rx_queues; i++) {
- struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
+ struct iwl_rxq *rxq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct device *dev = trans->dev;
+ int i;
+ int free_size;
+ bool use_rx_td = (trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560);
- spin_lock_init(&rxq->lock);
- if (trans->cfg->mq_rx_supported)
- rxq->queue_size = MQ_RX_TABLE_SIZE;
- else
- rxq->queue_size = RX_QUEUE_SIZE;
+ spin_lock_init(&rxq->lock);
+ if (trans->cfg->mq_rx_supported)
+ rxq->queue_size = MQ_RX_TABLE_SIZE;
+ else
+ rxq->queue_size = RX_QUEUE_SIZE;
- /*
- * Allocate the circular buffer of Read Buffer Descriptors
- * (RBDs)
- */
- rxq->bd = dma_zalloc_coherent(dev,
- free_size * rxq->queue_size,
- &rxq->bd_dma, GFP_KERNEL);
- if (!rxq->bd)
- goto err;
+ free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
- if (trans->cfg->mq_rx_supported) {
- rxq->used_bd = dma_zalloc_coherent(dev,
- sizeof(__le32) *
- rxq->queue_size,
- &rxq->used_bd_dma,
- GFP_KERNEL);
- if (!rxq->used_bd)
- goto err;
- }
+ /*
+ * Allocate the circular buffer of Read Buffer Descriptors
+ * (RBDs)
+ */
+ rxq->bd = dma_zalloc_coherent(dev,
+ free_size * rxq->queue_size,
+ &rxq->bd_dma, GFP_KERNEL);
+ if (!rxq->bd)
+ goto err;
- /*Allocate the driver's pointer to receive buffer status */
- rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
- &rxq->rb_stts_dma,
+ if (trans->cfg->mq_rx_supported) {
+ rxq->used_bd = dma_zalloc_coherent(dev,
+ (use_rx_td ?
+ sizeof(*rxq->cd) :
+ sizeof(__le32)) *
+ rxq->queue_size,
+ &rxq->used_bd_dma,
GFP_KERNEL);
- if (!rxq->rb_stts)
+ if (!rxq->used_bd)
goto err;
}
+
+ /* Allocate the driver's pointer to receive buffer status */
+ rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ?
+ sizeof(__le16) :
+ sizeof(struct iwl_rb_status),
+ &rxq->rb_stts_dma,
+ GFP_KERNEL);
+ if (!rxq->rb_stts)
+ goto err;
+
+ if (!use_rx_td)
+ return 0;
+
+ /* Allocate the driver's pointer to TR tail */
+ rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
+ &rxq->tr_tail_dma,
+ GFP_KERNEL);
+ if (!rxq->tr_tail)
+ goto err;
+
+ /* Allocate the driver's pointer to CR tail */
+ rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
+ &rxq->cr_tail_dma,
+ GFP_KERNEL);
+ if (!rxq->cr_tail)
+ goto err;
+ /*
+ * W/A 22560 device step Z0 must be non zero bug
+ * TODO: remove this when stop supporting Z0
+ */
+ *rxq->cr_tail = cpu_to_le16(500);
+
return 0;
err:
for (i = 0; i < trans->num_rx_queues; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
- if (rxq->bd)
- dma_free_coherent(dev, free_size * rxq->queue_size,
- rxq->bd, rxq->bd_dma);
- rxq->bd_dma = 0;
- rxq->bd = NULL;
-
- if (rxq->rb_stts)
- dma_free_coherent(trans->dev,
- sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
-
- if (rxq->used_bd)
- dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size,
- rxq->used_bd, rxq->used_bd_dma);
- rxq->used_bd_dma = 0;
- rxq->used_bd = NULL;
+ iwl_pcie_free_rxq_dma(trans, rxq);
}
kfree(trans_pcie->rxq);
return -ENOMEM;
}
+static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
+ int i, ret;
+
+ if (WARN_ON(trans_pcie->rxq))
+ return -EINVAL;
+
+ trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
+ GFP_KERNEL);
+ if (!trans_pcie->rxq)
+ return -EINVAL;
+
+ spin_lock_init(&rba->lock);
+
+ for (i = 0; i < trans->num_rx_queues; i++) {
+ struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+ ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -792,6 +903,9 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
int i;
switch (trans_pcie->rx_buf_size) {
+ case IWL_AMSDU_2K:
+ rb_size = RFH_RXF_DMA_RB_SIZE_2K;
+ break;
case IWL_AMSDU_4K:
rb_size = RFH_RXF_DMA_RB_SIZE_4K;
break;
@@ -872,7 +986,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
iwl_pcie_enable_rx_wake(trans, true);
}
-static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
+void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
{
lockdep_assert_held(&rxq->lock);
@@ -882,7 +996,7 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
rxq->used_count = 0;
}
-static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
+int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
{
WARN_ON(1);
return 0;
@@ -931,7 +1045,9 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
rxq->read = 0;
rxq->write = 0;
rxq->write_actual = 0;
- memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
+ memset(rxq->rb_stts, 0,
+ (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
+ sizeof(__le16) : sizeof(struct iwl_rb_status));
iwl_pcie_rx_init_rxb_lists(rxq);
@@ -1002,8 +1118,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rb_allocator *rba = &trans_pcie->rba;
- int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
- sizeof(__le32);
int i;
/*
@@ -1022,27 +1136,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
for (i = 0; i < trans->num_rx_queues; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
- if (rxq->bd)
- dma_free_coherent(trans->dev,
- free_size * rxq->queue_size,
- rxq->bd, rxq->bd_dma);
- rxq->bd_dma = 0;
- rxq->bd = NULL;
-
- if (rxq->rb_stts)
- dma_free_coherent(trans->dev,
- sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
- else
- IWL_DEBUG_INFO(trans,
- "Free rxq->rb_stts which is NULL\n");
-
- if (rxq->used_bd)
- dma_free_coherent(trans->dev,
- sizeof(__le32) * rxq->queue_size,
- rxq->used_bd, rxq->used_bd_dma);
- rxq->used_bd_dma = 0;
- rxq->used_bd = NULL;
+ iwl_pcie_free_rxq_dma(trans, rxq);
if (rxq->napi.poll)
netif_napi_del(&rxq->napi);
@@ -1202,6 +1296,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
}
page_stolen |= rxcb._page_stolen;
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ break;
offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
}
@@ -1236,6 +1332,45 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
}
+static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
+ struct iwl_rxq *rxq, int i)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_mem_buffer *rxb;
+ u16 vid;
+
+ if (!trans->cfg->mq_rx_supported) {
+ rxb = rxq->queue[i];
+ rxq->queue[i] = NULL;
+ return rxb;
+ }
+
+ /* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF;
+ else
+ vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF;
+
+ if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table))
+ goto out_err;
+
+ rxb = trans_pcie->global_table[vid - 1];
+ if (rxb->invalid)
+ goto out_err;
+
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE;
+
+ rxb->invalid = true;
+
+ return rxb;
+
+out_err:
+ WARN(1, "Invalid rxb from HW %u\n", (u32)vid);
+ iwl_force_nmi(trans);
+ return NULL;
+}
+
/*
* iwl_pcie_rx_handle - Main entry function for receiving responses from fw
*/
@@ -1250,7 +1385,7 @@ restart:
spin_lock(&rxq->lock);
/* uCode's read index (stored in shared DRAM) indicates the last Rx
* buffer that the driver may process (last buffer filled by ucode). */
- r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+ r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
i = rxq->read;
/* W/A 9000 device step A0 wrap-around bug */
@@ -1266,30 +1401,9 @@ restart:
if (unlikely(rxq->used_count == rxq->queue_size / 2))
emergency = true;
- if (trans->cfg->mq_rx_supported) {
- /*
- * used_bd is a 32 bit but only 12 are used to retrieve
- * the vid
- */
- u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF;
-
- if (WARN(!vid ||
- vid > ARRAY_SIZE(trans_pcie->global_table),
- "Invalid rxb index from HW %u\n", (u32)vid)) {
- iwl_force_nmi(trans);
- goto out;
- }
- rxb = trans_pcie->global_table[vid - 1];
- if (WARN(rxb->invalid,
- "Invalid rxb from HW %u\n", (u32)vid)) {
- iwl_force_nmi(trans);
- goto out;
- }
- rxb->invalid = true;
- } else {
- rxb = rxq->queue[i];
- rxq->queue[i] = NULL;
- }
+ rxb = iwl_pcie_get_rxb(trans, rxq, i);
+ if (!rxb)
+ goto out;
IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
@@ -1331,6 +1445,9 @@ restart:
out:
/* Backtrack one entry */
rxq->read = i;
+ /* update cr tail with the rxq read pointer */
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ *rxq->cr_tail = cpu_to_le16(r);
spin_unlock(&rxq->lock);
/*
@@ -1362,20 +1479,6 @@ static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
}
-static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
- struct msix_entry *entry)
-{
- /*
- * Before sending the interrupt the HW disables it to prevent
- * a nested interrupt. This is done by writing 1 to the corresponding
- * bit in the mask register. After handling the interrupt, it should be
- * re-enabled by clearing this bit. This register is defined as
- * write 1 clear (W1C) register, meaning that it's being clear
- * by writing 1 to the bit.
- */
- iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
-}
-
/*
* iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
* This interrupt handler should be used with RSS queue only.
@@ -1970,7 +2073,8 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
/* Error detected by uCode */
if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
- (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) {
+ (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
+ (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
IWL_ERR(trans,
"Microcode SW error detected. Restarting 0x%X.\n",
inta_fh);
@@ -1995,8 +2099,18 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
}
}
- /* uCode wakes up after power-down sleep */
- if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560 &&
+ inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) {
+ /* Reflect IML transfer status */
+ int res = iwl_read32(trans, CSR_IML_RESP_ADDR);
+
+ IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res);
+ if (res == IWL_IMAGE_RESP_FAIL) {
+ isr_stats->sw++;
+ iwl_pcie_irq_handle_error(trans);
+ }
+ } else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
+ /* uCode wakes up after power-down sleep */
IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
iwl_pcie_rxq_check_wrptr(trans);
iwl_pcie_txq_check_wrptrs(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index b8e8dac2895d..2bc67219ed3e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -53,6 +53,7 @@
#include "iwl-trans.h"
#include "iwl-prph.h"
#include "iwl-context-info.h"
+#include "iwl-context-info-gen3.h"
#include "internal.h"
/*
@@ -188,7 +189,10 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
}
iwl_pcie_ctxt_info_free_paging(trans);
- iwl_pcie_ctxt_info_free(trans);
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560)
+ iwl_pcie_ctxt_info_gen3_free(trans);
+ else
+ iwl_pcie_ctxt_info_free(trans);
/* Make sure (redundant) we've released our request to stay awake */
iwl_clear_bit(trans, CSR_GP_CNTRL,
@@ -346,7 +350,10 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
goto out;
}
- ret = iwl_pcie_ctxt_info_init(trans, fw);
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560)
+ ret = iwl_pcie_ctxt_info_gen3_init(trans, fw);
+ else
+ ret = iwl_pcie_ctxt_info_init(trans, fw);
if (ret)
goto out;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 7229991ae70d..7d319b6863fe 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -84,6 +84,7 @@
#include "iwl-scd.h"
#include "iwl-agn-hw.h"
#include "fw/error-dump.h"
+#include "fw/dbg.h"
#include "internal.h"
#include "iwl-fh.h"
@@ -203,7 +204,7 @@ static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
trans_pcie->fw_mon_size = 0;
}
-static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
+void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct page *page = NULL;
@@ -1132,21 +1133,44 @@ static struct iwl_causes_list causes_list[] = {
{MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
};
+static struct iwl_causes_list causes_list_v2[] = {
+ {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0},
+ {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1},
+ {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3},
+ {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5},
+ {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10},
+ {MSIX_HW_INT_CAUSES_REG_IPC, CSR_MSIX_HW_INT_MASK_AD, 0x11},
+ {MSIX_HW_INT_CAUSES_REG_SW_ERR_V2, CSR_MSIX_HW_INT_MASK_AD, 0x15},
+ {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16},
+ {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17},
+ {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18},
+ {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A},
+ {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B},
+ {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D},
+ {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
+};
+
static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
- int i;
+ int i, arr_size =
+ (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) ?
+ ARRAY_SIZE(causes_list) : ARRAY_SIZE(causes_list_v2);
/*
* Access all non RX causes and map them to the default irq.
* In case we are missing at least one interrupt vector,
* the first interrupt vector will serve non-RX and FBQ causes.
*/
- for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
- iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
- iwl_clear_bit(trans, causes_list[i].mask_reg,
- causes_list[i].cause_num);
+ for (i = 0; i < arr_size; i++) {
+ struct iwl_causes_list *causes =
+ (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) ?
+ causes_list : causes_list_v2;
+
+ iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
+ iwl_clear_bit(trans, causes[i].mask_reg,
+ causes[i].cause_num);
}
}
@@ -1539,18 +1563,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
iwl_pcie_enable_rx_wake(trans, true);
- /*
- * Reconfigure IVAR table in case of MSIX or reset ict table in
- * MSI mode since HW reset erased it.
- * Also enables interrupts - none will happen as
- * the device doesn't know we're waking it up, only when
- * the opmode actually tells it after this call.
- */
- iwl_pcie_conf_msix_hw(trans_pcie);
- if (!trans_pcie->msix_enabled)
- iwl_pcie_reset_ict(trans);
- iwl_enable_interrupts(trans);
-
iwl_set_bit(trans, CSR_GP_CNTRL,
BIT(trans->cfg->csr->flag_mac_access_req));
iwl_set_bit(trans, CSR_GP_CNTRL,
@@ -1568,6 +1580,18 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
return ret;
}
+ /*
+ * Reconfigure IVAR table in case of MSIX or reset ict table in
+ * MSI mode since HW reset erased it.
+ * Also enables interrupts - none will happen as
+ * the device doesn't know we're waking it up, only when
+ * the opmode actually tells it after this call.
+ */
+ iwl_pcie_conf_msix_hw(trans_pcie);
+ if (!trans_pcie->msix_enabled)
+ iwl_pcie_reset_ict(trans);
+ iwl_enable_interrupts(trans);
+
iwl_pcie_set_pwr(trans, false);
if (!reset) {
@@ -1685,29 +1709,6 @@ static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
}
}
-static const char *queue_name(struct device *dev,
- struct iwl_trans_pcie *trans_p, int i)
-{
- if (trans_p->shared_vec_mask) {
- int vec = trans_p->shared_vec_mask &
- IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
-
- if (i == 0)
- return DRV_NAME ": shared IRQ";
-
- return devm_kasprintf(dev, GFP_KERNEL,
- DRV_NAME ": queue %d", i + vec);
- }
- if (i == 0)
- return DRV_NAME ": default queue";
-
- if (i == trans_p->alloc_vecs - 1)
- return DRV_NAME ": exception";
-
- return devm_kasprintf(dev, GFP_KERNEL,
- DRV_NAME ": queue %d", i);
-}
-
static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
struct iwl_trans_pcie *trans_pcie)
{
@@ -2236,12 +2237,28 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
jiffies_to_msecs(txq->wd_timeout),
txq->read_ptr, txq->write_ptr,
iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
- (TFD_QUEUE_SIZE_MAX - 1),
+ (trans->cfg->base_params->max_tfd_queue_size - 1),
iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
- (TFD_QUEUE_SIZE_MAX - 1),
+ (trans->cfg->base_params->max_tfd_queue_size - 1),
iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
}
+static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
+ struct iwl_trans_rxq_dma_data *data)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (queue >= trans->num_rx_queues || !trans_pcie->rxq)
+ return -EINVAL;
+
+ data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
+ data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma;
+ data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma;
+ data->fr_bd_wid = 0;
+
+ return 0;
+}
+
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2522,10 +2539,11 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
rxq->free_count);
if (rxq->rb_stts) {
+ u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans,
+ rxq));
pos += scnprintf(buf + pos, bufsz - pos,
"\tclosed_rb_num: %u\n",
- le16_to_cpu(rxq->rb_stts->closed_rb_num) &
- 0x0FFF);
+ r & 0x0FFF);
} else {
pos += scnprintf(buf + pos, bufsz - pos,
"\tclosed_rb_num: Not Allocated\n");
@@ -2731,7 +2749,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
spin_lock(&rxq->lock);
- r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+ r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
for (i = rxq->read, j = 0;
i != r && j < allocated_rb_nums;
@@ -2934,11 +2952,12 @@ static struct iwl_trans_dump_data
struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_fw_error_dump_txcmd *txcmd;
struct iwl_trans_dump_data *dump_data;
- u32 len, num_rbs;
+ u32 len, num_rbs = 0;
u32 monitor_len;
int i, ptr;
bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
- !trans->cfg->mq_rx_supported;
+ !trans->cfg->mq_rx_supported &&
+ trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
/* transport dump header */
len = sizeof(*dump_data);
@@ -2990,6 +3009,10 @@ static struct iwl_trans_dump_data
}
if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
+ if (!(trans->dbg_dump_mask &
+ BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)))
+ return NULL;
+
dump_data = vzalloc(len);
if (!dump_data)
return NULL;
@@ -3002,22 +3025,28 @@ static struct iwl_trans_dump_data
}
/* CSR registers */
- len += sizeof(*data) + IWL_CSR_TO_DUMP;
+ if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
+ len += sizeof(*data) + IWL_CSR_TO_DUMP;
/* FH registers */
- if (trans->cfg->gen2)
- len += sizeof(*data) +
- (FH_MEM_UPPER_BOUND_GEN2 - FH_MEM_LOWER_BOUND_GEN2);
- else
- len += sizeof(*data) +
- (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
+ if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
+ if (trans->cfg->gen2)
+ len += sizeof(*data) +
+ (FH_MEM_UPPER_BOUND_GEN2 -
+ FH_MEM_LOWER_BOUND_GEN2);
+ else
+ len += sizeof(*data) +
+ (FH_MEM_UPPER_BOUND -
+ FH_MEM_LOWER_BOUND);
+ }
if (dump_rbs) {
/* Dump RBs is supported only for pre-9000 devices (1 queue) */
struct iwl_rxq *rxq = &trans_pcie->rxq[0];
/* RBs */
- num_rbs = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num))
- & 0x0FFF;
+ num_rbs =
+ le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq))
+ & 0x0FFF;
num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
len += num_rbs * (sizeof(*data) +
sizeof(struct iwl_fw_error_dump_rb) +
@@ -3025,7 +3054,8 @@ static struct iwl_trans_dump_data
}
/* Paged memory for gen2 HW */
- if (trans->cfg->gen2)
+ if (trans->cfg->gen2 &&
+ trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++)
len += sizeof(*data) +
sizeof(struct iwl_fw_error_dump_paging) +
@@ -3037,41 +3067,51 @@ static struct iwl_trans_dump_data
len = 0;
data = (void *)dump_data->data;
- data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
- txcmd = (void *)data->data;
- spin_lock_bh(&cmdq->lock);
- ptr = cmdq->write_ptr;
- for (i = 0; i < cmdq->n_window; i++) {
- u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
- u32 caplen, cmdlen;
-
- cmdlen = iwl_trans_pcie_get_cmdlen(trans, cmdq->tfds +
- trans_pcie->tfd_size * ptr);
- caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
-
- if (cmdlen) {
- len += sizeof(*txcmd) + caplen;
- txcmd->cmdlen = cpu_to_le32(cmdlen);
- txcmd->caplen = cpu_to_le32(caplen);
- memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
- txcmd = (void *)((u8 *)txcmd->data + caplen);
+
+ if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD)) {
+ u16 tfd_size = trans_pcie->tfd_size;
+
+ data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
+ txcmd = (void *)data->data;
+ spin_lock_bh(&cmdq->lock);
+ ptr = cmdq->write_ptr;
+ for (i = 0; i < cmdq->n_window; i++) {
+ u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
+ u32 caplen, cmdlen;
+
+ cmdlen = iwl_trans_pcie_get_cmdlen(trans,
+ cmdq->tfds +
+ tfd_size * ptr);
+ caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
+
+ if (cmdlen) {
+ len += sizeof(*txcmd) + caplen;
+ txcmd->cmdlen = cpu_to_le32(cmdlen);
+ txcmd->caplen = cpu_to_le32(caplen);
+ memcpy(txcmd->data, cmdq->entries[idx].cmd,
+ caplen);
+ txcmd = (void *)((u8 *)txcmd->data + caplen);
+ }
+
+ ptr = iwl_queue_dec_wrap(trans, ptr);
}
+ spin_unlock_bh(&cmdq->lock);
- ptr = iwl_queue_dec_wrap(ptr);
+ data->len = cpu_to_le32(len);
+ len += sizeof(*data);
+ data = iwl_fw_error_next_data(data);
}
- spin_unlock_bh(&cmdq->lock);
- data->len = cpu_to_le32(len);
- len += sizeof(*data);
- data = iwl_fw_error_next_data(data);
-
- len += iwl_trans_pcie_dump_csr(trans, &data);
- len += iwl_trans_pcie_fh_regs_dump(trans, &data);
+ if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
+ len += iwl_trans_pcie_dump_csr(trans, &data);
+ if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS))
+ len += iwl_trans_pcie_fh_regs_dump(trans, &data);
if (dump_rbs)
len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
/* Paged memory for gen2 HW */
- if (trans->cfg->gen2) {
+ if (trans->cfg->gen2 &&
+ trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) {
struct iwl_fw_error_dump_paging *paging;
dma_addr_t addr =
@@ -3091,8 +3131,8 @@ static struct iwl_trans_dump_data
len += sizeof(*data) + sizeof(*paging) + page_len;
}
}
-
- len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
+ if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
+ len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
dump_data->len = len;
@@ -3187,6 +3227,7 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
.txq_alloc = iwl_trans_pcie_dyn_txq_alloc,
.txq_free = iwl_trans_pcie_dyn_txq_free,
.wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
+ .rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
};
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
@@ -3349,14 +3390,26 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
#if IS_ENABLED(CONFIG_IWLMVM)
trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);
- if (trans->hw_rf_id == CSR_HW_RF_ID_TYPE_HR) {
+
+ if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
u32 hw_status;
hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
- if (hw_status & UMAG_GEN_HW_IS_FPGA)
- trans->cfg = &iwl22000_2ax_cfg_qnj_hr_f0;
- else
+ if (CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_B_STEP)
+ /*
+ * b step fw is the same for physical card and fpga
+ */
+ trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+ else if ((hw_status & UMAG_GEN_HW_IS_FPGA) &&
+ CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_A_STEP) {
+ trans->cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0;
+ } else {
+ /*
+ * a step no FPGA
+ */
trans->cfg = &iwl22000_2ac_cfg_hr;
+ }
}
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 48890a1c825f..b99f33ff9123 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -19,6 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -50,6 +52,7 @@
*****************************************************************************/
#include <linux/pm_runtime.h>
#include <net/tso.h>
+#include <linux/tcp.h>
#include "iwl-debug.h"
#include "iwl-csr.h"
@@ -84,16 +87,20 @@ void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
/*
* iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
*/
-static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt,
+static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
+ struct iwl_txq *txq, u16 byte_cnt,
int num_tbs)
{
struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
+ struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
+ struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
u8 filled_tfd_size, num_fetch_chunks;
u16 len = byte_cnt;
__le16 bc_ent;
- len = DIV_ROUND_UP(len, 4);
+ if (trans_pcie->bc_table_dword)
+ len = DIV_ROUND_UP(len, 4);
if (WARN_ON(len > 0xFFF || idx >= txq->n_window))
return;
@@ -111,7 +118,10 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt,
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
- scd_bc_tbl->tfd_offset[idx] = bc_ent;
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
+ else
+ scd_bc_tbl->tfd_offset[idx] = bc_ent;
}
/*
@@ -355,52 +365,89 @@ out_err:
return -EINVAL;
}
-static
-struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
- struct iwl_txq *txq,
- struct iwl_device_cmd *dev_cmd,
- struct sk_buff *skb,
- struct iwl_cmd_meta *out_meta)
+static struct
+iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta,
+ int hdr_len,
+ int tx_cmd_len)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
dma_addr_t tb_phys;
- bool amsdu;
- int i, len, tb1_len, tb2_len, hdr_len;
+ int len;
void *tb1_addr;
- memset(tfd, 0, sizeof(*tfd));
+ tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
- amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
- (*ieee80211_get_qos_ctl(hdr) &
- IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
+
+ /*
+ * The second TB (tb1) points to the remainder of the TX command
+ * and the 802.11 header - dword aligned size
+ * (This calculation modifies the TX command, so do it before the
+ * setup of the first TB)
+ */
+ len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
+ IWL_FIRST_TB_SIZE;
+
+ /* do not align A-MSDU to dword as the subframe header aligns it */
+
+ /* map the data for TB1 */
+ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
+ tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ goto out_err;
+ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len);
+
+ if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
+ len + IWL_FIRST_TB_SIZE,
+ hdr_len, dev_cmd))
+ goto out_err;
+
+ /* building the A-MSDU might have changed this data, memcpy it now */
+ memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);
+ return tfd;
+
+out_err:
+ iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
+ return NULL;
+}
+
+static struct
+iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta,
+ int hdr_len,
+ int tx_cmd_len)
+{
+ int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
+ dma_addr_t tb_phys;
+ int i, len, tb1_len, tb2_len;
+ void *tb1_addr;
tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
+
/* The first TB points to bi-directional DMA data */
- if (!amsdu)
- memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr,
- IWL_FIRST_TB_SIZE);
+ memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
- /* there must be data left over for TB1 or this code must be changed */
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
-
/*
* The second TB (tb1) points to the remainder of the TX command
* and the 802.11 header - dword aligned size
* (This calculation modifies the TX command, so do it before the
* setup of the first TB)
*/
- len = sizeof(struct iwl_tx_cmd_gen2) + sizeof(struct iwl_cmd_header) +
- ieee80211_hdrlen(hdr->frame_control) - IWL_FIRST_TB_SIZE;
+ len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
+ IWL_FIRST_TB_SIZE;
- /* do not align A-MSDU to dword as the subframe header aligns it */
- if (amsdu)
- tb1_len = len;
- else
- tb1_len = ALIGN(len, 4);
+ tb1_len = ALIGN(len, 4);
/* map the data for TB1 */
tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
@@ -409,23 +456,6 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
goto out_err;
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
-
- if (amsdu) {
- if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
- tb1_len + IWL_FIRST_TB_SIZE,
- hdr_len, dev_cmd))
- goto out_err;
-
- /*
- * building the A-MSDU might have changed this data, so memcpy
- * it now
- */
- memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr,
- IWL_FIRST_TB_SIZE);
- return tfd;
- }
-
/* set up TFD's third entry to point to remainder of skb's head */
tb2_len = skb_headlen(skb) - hdr_len;
@@ -467,13 +497,50 @@ out_err:
return NULL;
}
+static
+struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
+ int len, hdr_len;
+ bool amsdu;
+
+ /* There must be data left over for TB1 or this code must be changed */
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
+
+ memset(tfd, 0, sizeof(*tfd));
+
+ if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
+ len = sizeof(struct iwl_tx_cmd_gen2);
+ else
+ len = sizeof(struct iwl_tx_cmd_gen3);
+
+ amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
+ (*ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (amsdu)
+ return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
+ out_meta, hdr_len, len);
+
+ return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
+ hdr_len, len);
+}
+
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
struct iwl_cmd_meta *out_meta;
struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ u16 cmd_len;
int idx;
void *tfd;
@@ -488,11 +555,23 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
spin_lock(&txq->lock);
- if (iwl_queue_space(txq) < txq->high_mark) {
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
+ (void *)dev_cmd->payload;
+
+ cmd_len = le16_to_cpu(tx_cmd_gen3->len);
+ } else {
+ struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
+ (void *)dev_cmd->payload;
+
+ cmd_len = le16_to_cpu(tx_cmd_gen2->len);
+ }
+
+ if (iwl_queue_space(trans, txq) < txq->high_mark) {
iwl_stop_queue(trans, txq);
/* don't put the packet on the ring, if there is no room */
- if (unlikely(iwl_queue_space(txq) < 3)) {
+ if (unlikely(iwl_queue_space(trans, txq) < 3)) {
struct iwl_device_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
@@ -526,7 +605,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
/* Set up entry for this TFD in Tx byte-count array */
- iwl_pcie_gen2_update_byte_tbl(txq, le16_to_cpu(tx_cmd->len),
+ iwl_pcie_gen2_update_byte_tbl(trans_pcie, txq, cmd_len,
iwl_pcie_gen2_get_num_tbs(trans, tfd));
/* start timer if queue currently empty */
@@ -538,7 +617,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
/* Tell device the write index *just past* this latest filled TFD */
- txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
+ txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
/*
* At this point the frame is "transmitted" successfully
@@ -650,7 +729,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
memset(tfd, 0, sizeof(*tfd));
- if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
spin_unlock_bh(&txq->lock);
IWL_ERR(trans, "No space in command queue\n");
@@ -787,7 +866,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
iwl_trans_ref(trans);
}
/* Increment and update queue's write index */
- txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
+ txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
@@ -954,7 +1033,7 @@ void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
iwl_pcie_free_tso_page(trans_pcie, skb);
}
iwl_pcie_gen2_free_tfd(trans, txq);
- txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
+ txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
if (txq->read_ptr == txq->write_ptr) {
unsigned long flags;
@@ -1062,6 +1141,9 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
if (!txq)
return -ENOMEM;
ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
+ (trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_22560) ?
+ sizeof(struct iwl_gen3_bc_tbl) :
sizeof(struct iwlagn_scd_bc_tbl));
if (ret) {
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
@@ -1113,7 +1195,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
txq->id = qid;
trans_pcie->txq[qid] = txq;
- wr_ptr &= (TFD_QUEUE_SIZE_MAX - 1);
+ wr_ptr &= (trans->cfg->base_params->max_tfd_queue_size - 1);
/* Place first TFD at index corresponding to start sequence number */
txq->read_ptr = wr_ptr;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 473fe7ccb07c..93f0d387688a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -71,27 +71,28 @@
*
***************************************************/
-int iwl_queue_space(const struct iwl_txq *q)
+int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q)
{
unsigned int max;
unsigned int used;
/*
* To avoid ambiguity between empty and completely full queues, there
- * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue.
- * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need
+ * should always be less than max_tfd_queue_size elements in the queue.
+ * If q->n_window is smaller than max_tfd_queue_size, there is no need
* to reserve any queue entries for this purpose.
*/
- if (q->n_window < TFD_QUEUE_SIZE_MAX)
+ if (q->n_window < trans->cfg->base_params->max_tfd_queue_size)
max = q->n_window;
else
- max = TFD_QUEUE_SIZE_MAX - 1;
+ max = trans->cfg->base_params->max_tfd_queue_size - 1;
/*
- * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to
- * modulo by TFD_QUEUE_SIZE_MAX and is well defined.
+ * max_tfd_queue_size is a power of 2, so the following is equivalent to
+ * modulo by max_tfd_queue_size and is well defined.
*/
- used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1);
+ used = (q->write_ptr - q->read_ptr) &
+ (trans->cfg->base_params->max_tfd_queue_size - 1);
if (WARN_ON(used > max))
return 0;
@@ -489,7 +490,8 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, bool cmd_queue)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX;
+ size_t tfd_sz = trans_pcie->tfd_size *
+ trans->cfg->base_params->max_tfd_queue_size;
size_t tb0_buf_sz;
int i;
@@ -555,12 +557,16 @@ int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, bool cmd_queue)
{
int ret;
+ u32 tfd_queue_max_size = trans->cfg->base_params->max_tfd_queue_size;
txq->need_update = false;
- /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
+ /* max_tfd_queue_size must be power-of-two size, otherwise
* iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
- BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
+ if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
+ "Max tfd queue size must be a power of two, but is %d",
+ tfd_queue_max_size))
+ return -EINVAL;
/* Initialize queue's high/low-water marks, and head/tail indexes */
ret = iwl_queue_init(txq, slots_num);
@@ -637,7 +643,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
iwl_pcie_free_tso_page(trans_pcie, skb);
}
iwl_pcie_txq_free_tfd(trans, txq);
- txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
+ txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
if (txq->read_ptr == txq->write_ptr) {
unsigned long flags;
@@ -696,7 +702,8 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
/* De-alloc circular buffer of TFDs */
if (txq->tfds) {
dma_free_coherent(dev,
- trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX,
+ trans_pcie->tfd_size *
+ trans->cfg->base_params->max_tfd_queue_size,
txq->tfds, txq->dma_addr);
txq->dma_addr = 0;
txq->tfds = NULL;
@@ -916,9 +923,11 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
int ret;
int txq_id, slots_num;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u16 bc_tbls_size = trans->cfg->base_params->num_of_queues;
- u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
- sizeof(struct iwlagn_scd_bc_tbl);
+ bc_tbls_size *= (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
+ sizeof(struct iwl_gen3_bc_tbl) :
+ sizeof(struct iwlagn_scd_bc_tbl);
/*It is not allowed to alloc twice, so warn when this happens.
* We cannot rely on the previous allocation, so free and fail */
@@ -928,7 +937,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
}
ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
- scd_bc_tbls_size);
+ bc_tbls_size);
if (ret) {
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
goto error;
@@ -1064,7 +1073,8 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[txq_id];
- int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
+ int tfd_num = iwl_pcie_get_cmd_index(txq, ssn);
+ int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
int last_to_free;
/* This function is not meant to release cmd queue*/
@@ -1079,7 +1089,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
goto out;
}
- if (txq->read_ptr == tfd_num)
+ if (read_ptr == tfd_num)
goto out;
IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
@@ -1087,12 +1097,13 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
/*Since we free until index _not_ inclusive, the one before index is
* the last we will free. This one must be used */
- last_to_free = iwl_queue_dec_wrap(tfd_num);
+ last_to_free = iwl_queue_dec_wrap(trans, tfd_num);
if (!iwl_queue_used(txq, last_to_free)) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
- __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
+ __func__, txq_id, last_to_free,
+ trans->cfg->base_params->max_tfd_queue_size,
txq->write_ptr, txq->read_ptr);
goto out;
}
@@ -1101,10 +1112,10 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
goto out;
for (;
- txq->read_ptr != tfd_num;
- txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) {
- int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
- struct sk_buff *skb = txq->entries[idx].skb;
+ read_ptr != tfd_num;
+ txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr),
+ read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr)) {
+ struct sk_buff *skb = txq->entries[read_ptr].skb;
if (WARN_ON_ONCE(!skb))
continue;
@@ -1113,7 +1124,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
__skb_queue_tail(skbs, skb);
- txq->entries[idx].skb = NULL;
+ txq->entries[read_ptr].skb = NULL;
if (!trans->cfg->use_tfh)
iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
@@ -1123,7 +1134,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
iwl_pcie_txq_progress(txq);
- if (iwl_queue_space(txq) > txq->low_mark &&
+ if (iwl_queue_space(trans, txq) > txq->low_mark &&
test_bit(txq_id, trans_pcie->queue_stopped)) {
struct sk_buff_head overflow_skbs;
@@ -1155,7 +1166,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
}
spin_lock_bh(&txq->lock);
- if (iwl_queue_space(txq) > txq->low_mark)
+ if (iwl_queue_space(trans, txq) > txq->low_mark)
iwl_wake_queue(trans, txq);
}
@@ -1225,23 +1236,30 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
struct iwl_txq *txq = trans_pcie->txq[txq_id];
unsigned long flags;
int nfreed = 0;
+ u16 r;
lockdep_assert_held(&txq->lock);
- if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(txq, idx))) {
+ idx = iwl_pcie_get_cmd_index(txq, idx);
+ r = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
+
+ if (idx >= trans->cfg->base_params->max_tfd_queue_size ||
+ (!iwl_queue_used(txq, idx))) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
- __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
+ __func__, txq_id, idx,
+ trans->cfg->base_params->max_tfd_queue_size,
txq->write_ptr, txq->read_ptr);
return;
}
- for (idx = iwl_queue_inc_wrap(idx); txq->read_ptr != idx;
- txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) {
+ for (idx = iwl_queue_inc_wrap(trans, idx); r != idx;
+ r = iwl_queue_inc_wrap(trans, r)) {
+ txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
if (nfreed++ > 0) {
IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
- idx, txq->write_ptr, txq->read_ptr);
+ idx, txq->write_ptr, r);
iwl_force_nmi(trans);
}
}
@@ -1555,7 +1573,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
spin_lock_bh(&txq->lock);
- if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
spin_unlock_bh(&txq->lock);
IWL_ERR(trans, "No space in command queue\n");
@@ -1711,7 +1729,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
}
/* Increment and update queue's write index */
- txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
+ txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
iwl_pcie_txq_inc_wr_ptr(trans, txq);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
@@ -2311,11 +2329,11 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
spin_lock(&txq->lock);
- if (iwl_queue_space(txq) < txq->high_mark) {
+ if (iwl_queue_space(trans, txq) < txq->high_mark) {
iwl_stop_queue(trans, txq);
/* don't put the packet on the ring, if there is no room */
- if (unlikely(iwl_queue_space(txq) < 3)) {
+ if (unlikely(iwl_queue_space(trans, txq) < 3)) {
struct iwl_device_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
@@ -2444,7 +2462,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
/* Tell device the write index *just past* this latest filled TFD */
- txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
+ txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
if (!wait_write_ptr)
iwl_pcie_txq_inc_wr_ptr(trans, txq);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ap.c b/drivers/net/wireless/intersil/hostap/hostap_ap.c
index d1884b8913e7..0094b1d2b577 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ap.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ap.c
@@ -66,7 +66,7 @@ static void prism2_send_mgmt(struct net_device *dev,
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-#ifndef PRISM2_NO_PROCFS_DEBUG
+#if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS)
static int ap_debug_proc_show(struct seq_file *m, void *v)
{
struct ap_data *ap = PDE_DATA(file_inode(m->file));
@@ -81,8 +81,7 @@ static int ap_debug_proc_show(struct seq_file *m, void *v)
seq_printf(m, "tx_drop_nonassoc=%u\n", ap->tx_drop_nonassoc);
return 0;
}
-#endif /* PRISM2_NO_PROCFS_DEBUG */
-
+#endif
static void ap_sta_hash_add(struct ap_data *ap, struct sta_info *sta)
{
@@ -990,7 +989,7 @@ static void prism2_send_mgmt(struct net_device *dev,
}
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-
+#ifdef CONFIG_PROC_FS
static int prism2_sta_proc_show(struct seq_file *m, void *v)
{
struct sta_info *sta = m->private;
@@ -1059,6 +1058,7 @@ static int prism2_sta_proc_show(struct seq_file *m, void *v)
return 0;
}
+#endif
static void handle_add_proc_queue(struct work_struct *work)
{
diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c
index 2720aa39f530..ad1aa65fee7f 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c
@@ -151,13 +151,6 @@ static int prism2_get_ram_size(local_info_t *local);
#define HFA384X_MAGIC 0x8A32
#endif
-
-static u16 hfa384x_read_reg(struct net_device *dev, u16 reg)
-{
- return HFA384X_INW(reg);
-}
-
-
static void hfa384x_read_regs(struct net_device *dev,
struct hfa384x_regs *regs)
{
@@ -2897,7 +2890,12 @@ static void hostap_tick_timer(struct timer_list *t)
}
-#ifndef PRISM2_NO_PROCFS_DEBUG
+#if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS)
+static u16 hfa384x_read_reg(struct net_device *dev, u16 reg)
+{
+ return HFA384X_INW(reg);
+}
+
static int prism2_registers_proc_show(struct seq_file *m, void *v)
{
local_info_t *local = m->private;
@@ -2951,8 +2949,7 @@ static int prism2_registers_proc_show(struct seq_file *m, void *v)
return 0;
}
-#endif /* PRISM2_NO_PROCFS_DEBUG */
-
+#endif
struct set_tim_data {
struct list_head list;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_proc.c b/drivers/net/wireless/intersil/hostap/hostap_proc.c
index 5b33ccab9188..703d74cea3c2 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_proc.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_proc.c
@@ -11,8 +11,7 @@
#define PROC_LIMIT (PAGE_SIZE - 80)
-
-#ifndef PRISM2_NO_PROCFS_DEBUG
+#if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS)
static int prism2_debug_proc_show(struct seq_file *m, void *v)
{
local_info_t *local = m->private;
@@ -43,9 +42,9 @@ static int prism2_debug_proc_show(struct seq_file *m, void *v)
return 0;
}
-#endif /* PRISM2_NO_PROCFS_DEBUG */
-
+#endif
+#ifdef CONFIG_PROC_FS
static int prism2_stats_proc_show(struct seq_file *m, void *v)
{
local_info_t *local = m->private;
@@ -82,6 +81,7 @@ static int prism2_stats_proc_show(struct seq_file *m, void *v)
return 0;
}
+#endif
static int prism2_wds_proc_show(struct seq_file *m, void *v)
{
@@ -174,6 +174,7 @@ static const struct seq_operations prism2_bss_list_proc_seqops = {
.show = prism2_bss_list_proc_show,
};
+#ifdef CONFIG_PROC_FS
static int prism2_crypt_proc_show(struct seq_file *m, void *v)
{
local_info_t *local = m->private;
@@ -190,6 +191,7 @@ static int prism2_crypt_proc_show(struct seq_file *m, void *v)
}
return 0;
}
+#endif
static ssize_t prism2_pda_proc_read(struct file *file, char __user *buf,
size_t count, loff_t *_pos)
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index 5d75c971004b..e2addd8b878b 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -696,10 +696,11 @@ void mwifiex_11n_delba(struct mwifiex_private *priv, int tid)
"Send delba to tid=%d, %pM\n",
tid, rx_reor_tbl_ptr->ta);
mwifiex_send_delba(priv, tid, rx_reor_tbl_ptr->ta, 0);
- goto exit;
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+ flags);
+ return;
}
}
-exit:
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 7ab44cd32a9d..8e63d14c1e1c 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -103,6 +103,8 @@ static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
* There could be holes in the buffer, which are skipped by the function.
* Since the buffer is linear, the function uses rotation to simulate
* circular buffer.
+ *
+ * The caller must hold rx_reorder_tbl_lock spinlock.
*/
static void
mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
@@ -111,25 +113,21 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
{
int pkt_to_send, i;
void *rx_tmp_ptr;
- unsigned long flags;
pkt_to_send = (start_win > tbl->start_win) ?
min((start_win - tbl->start_win), tbl->win_size) :
tbl->win_size;
for (i = 0; i < pkt_to_send; ++i) {
- spin_lock_irqsave(&priv->rx_pkt_lock, flags);
rx_tmp_ptr = NULL;
if (tbl->rx_reorder_ptr[i]) {
rx_tmp_ptr = tbl->rx_reorder_ptr[i];
tbl->rx_reorder_ptr[i] = NULL;
}
- spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
if (rx_tmp_ptr)
mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
}
- spin_lock_irqsave(&priv->rx_pkt_lock, flags);
/*
* We don't have a circular buffer, hence use rotation to simulate
* circular buffer
@@ -140,7 +138,6 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
}
tbl->start_win = start_win;
- spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
}
/*
@@ -150,6 +147,8 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
* The start window is adjusted automatically when a hole is located.
* Since the buffer is linear, the function uses rotation to simulate
* circular buffer.
+ *
+ * The caller must hold rx_reorder_tbl_lock spinlock.
*/
static void
mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
@@ -157,21 +156,15 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
{
int i, j, xchg;
void *rx_tmp_ptr;
- unsigned long flags;
for (i = 0; i < tbl->win_size; ++i) {
- spin_lock_irqsave(&priv->rx_pkt_lock, flags);
- if (!tbl->rx_reorder_ptr[i]) {
- spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
+ if (!tbl->rx_reorder_ptr[i])
break;
- }
rx_tmp_ptr = tbl->rx_reorder_ptr[i];
tbl->rx_reorder_ptr[i] = NULL;
- spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
}
- spin_lock_irqsave(&priv->rx_pkt_lock, flags);
/*
* We don't have a circular buffer, hence use rotation to simulate
* circular buffer
@@ -184,7 +177,6 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
}
}
tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1);
- spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
}
/*
@@ -192,6 +184,8 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
*
* The function stops the associated timer and dispatches all the
* pending packets in the Rx reorder table before deletion.
+ *
+ * The caller must hold rx_reorder_tbl_lock spinlock.
*/
static void
mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
@@ -217,11 +211,7 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
del_timer_sync(&tbl->timer_context.timer);
tbl->timer_context.timer_is_set = false;
-
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
list_del(&tbl->list);
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
-
kfree(tbl->rx_reorder_ptr);
kfree(tbl);
@@ -234,22 +224,17 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
/*
* This function returns the pointer to an entry in Rx reordering
* table which matches the given TA/TID pair.
+ *
+ * The caller must hold rx_reorder_tbl_lock spinlock.
*/
struct mwifiex_rx_reorder_tbl *
mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
{
struct mwifiex_rx_reorder_tbl *tbl;
- unsigned long flags;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
- list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) {
- if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
- flags);
+ list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
+ if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid)
return tbl;
- }
- }
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return NULL;
}
@@ -266,14 +251,9 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
return;
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
- list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
- if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
- flags);
+ list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list)
+ if (!memcmp(tbl->ta, ta, ETH_ALEN))
mwifiex_del_rx_reorder_entry(priv, tbl);
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
- }
- }
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return;
@@ -282,24 +262,18 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
/*
* This function finds the last sequence number used in the packets
* buffered in Rx reordering table.
+ *
+ * The caller must hold rx_reorder_tbl_lock spinlock.
*/
static int
mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx)
{
struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr;
- struct mwifiex_private *priv = ctx->priv;
- unsigned long flags;
int i;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
- for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) {
- if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
- flags);
+ for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i)
+ if (rx_reorder_tbl_ptr->rx_reorder_ptr[i])
return i;
- }
- }
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return -1;
}
@@ -317,17 +291,22 @@ mwifiex_flush_data(struct timer_list *t)
struct reorder_tmr_cnxt *ctx =
from_timer(ctx, t, timer);
int start_win, seq_num;
+ unsigned long flags;
ctx->timer_is_set = false;
+ spin_lock_irqsave(&ctx->priv->rx_reorder_tbl_lock, flags);
seq_num = mwifiex_11n_find_last_seq_num(ctx);
- if (seq_num < 0)
+ if (seq_num < 0) {
+ spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
return;
+ }
mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num);
start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
start_win);
+ spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
}
/*
@@ -354,11 +333,14 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
* If we get a TID, ta pair which is already present dispatch all the
* the packets and move the window size until the ssn
*/
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
if (tbl) {
mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num);
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return;
}
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
/* if !tbl then create one */
new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL);
if (!new_node)
@@ -569,16 +551,20 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
int prev_start_win, start_win, end_win, win_size;
u16 pkt_index;
bool init_window_shift = false;
+ unsigned long flags;
int ret = 0;
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
if (!tbl) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
if (pkt_type != PKT_TYPE_BAR)
mwifiex_11n_dispatch_pkt(priv, payload);
return ret;
}
if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
mwifiex_11n_dispatch_pkt(priv, payload);
return ret;
}
@@ -665,6 +651,8 @@ done:
if (!tbl->timer_context.timer_is_set ||
prev_start_win != tbl->start_win)
mwifiex_11n_rxreorder_timer_restart(tbl);
+
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return ret;
}
@@ -693,14 +681,18 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
peer_mac, tid, initiator);
if (cleanup_rx_reorder_tbl) {
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
peer_mac);
if (!tbl) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+ flags);
mwifiex_dbg(priv->adapter, EVENT,
"event: TID, TA not found in table\n");
return;
}
mwifiex_del_rx_reorder_entry(priv, tbl);
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
} else {
ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
if (!ptx_tbl) {
@@ -734,6 +726,7 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
int tid, win_size;
struct mwifiex_rx_reorder_tbl *tbl;
uint16_t block_ack_param_set;
+ unsigned long flags;
block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
@@ -747,17 +740,20 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n",
add_ba_rsp->peer_mac_addr, tid);
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
add_ba_rsp->peer_mac_addr);
if (tbl)
mwifiex_del_rx_reorder_entry(priv, tbl);
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return 0;
}
win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
>> BLOCKACKPARAM_WINSIZE_POS;
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
add_ba_rsp->peer_mac_addr);
if (tbl) {
@@ -768,6 +764,7 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
else
tbl->amsdu = false;
}
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
mwifiex_dbg(priv->adapter, CMD,
"cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
@@ -807,11 +804,8 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
list_for_each_entry_safe(del_tbl_ptr, tmp_node,
- &priv->rx_reorder_tbl_ptr, list) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+ &priv->rx_reorder_tbl_ptr, list)
mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
- }
INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
@@ -935,6 +929,7 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
int tlv_buf_left = len;
int ret;
u8 *tmp;
+ unsigned long flags;
mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
event_buf, len);
@@ -954,14 +949,18 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
tlv_bitmap_len);
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
rx_reor_tbl_ptr =
mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid,
tlv_rxba->mac);
if (!rx_reor_tbl_ptr) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+ flags);
mwifiex_dbg(priv->adapter, ERROR,
"Can not find rx_reorder_tbl!");
return;
}
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
for (i = 0; i < tlv_bitmap_len; i++) {
for (j = 0 ; j < 8; j++) {
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index c02e02c17c9c..adc88433faa8 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -2322,7 +2322,8 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
if (priv->scan_block)
priv->scan_block = false;
- if (adapter->surprise_removed || adapter->is_cmd_timedout) {
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags) ||
+ test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"%s: Ignore connection.\t"
"Card removed or FW in bad state\n",
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index 9cfcdf6bec52..60db2b969e20 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -372,7 +372,7 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
adapter->ps_state = PS_STATE_SLEEP_CFM;
if (!le16_to_cpu(sleep_cfm_buf->resp_ctrl) &&
- (adapter->is_hs_configured &&
+ (test_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags) &&
!adapter->sleep_period.period)) {
adapter->pm_wakeup_card_req = true;
mwifiex_hs_activated_event(mwifiex_get_priv
@@ -564,25 +564,26 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
return -1;
}
- if (adapter->is_suspended) {
+ if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"PREP_CMD: device in suspended state\n");
return -1;
}
- if (adapter->hs_enabling && cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) {
+ if (test_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags) &&
+ cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) {
mwifiex_dbg(adapter, ERROR,
"PREP_CMD: host entering sleep state\n");
return -1;
}
- if (adapter->surprise_removed) {
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"PREP_CMD: card is removed\n");
return -1;
}
- if (adapter->is_cmd_timedout) {
+ if (test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"PREP_CMD: FW is in bad state\n");
return -1;
@@ -789,7 +790,8 @@ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter)
if (priv && (host_cmd->command !=
cpu_to_le16(HostCmd_CMD_802_11_HS_CFG_ENH))) {
if (adapter->hs_activated) {
- adapter->is_hs_configured = false;
+ clear_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &adapter->work_flags);
mwifiex_hs_activated_event(priv, false);
}
}
@@ -825,7 +827,7 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
return -1;
}
- adapter->is_cmd_timedout = 0;
+ clear_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags);
resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data;
if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) {
@@ -927,7 +929,7 @@ mwifiex_cmd_timeout_func(struct timer_list *t)
struct mwifiex_adapter *adapter = from_timer(adapter, t, cmd_timer);
struct cmd_ctrl_node *cmd_node;
- adapter->is_cmd_timedout = 1;
+ set_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags);
if (!adapter->curr_cmd) {
mwifiex_dbg(adapter, ERROR,
"cmd: empty curr_cmd\n");
@@ -953,7 +955,8 @@ mwifiex_cmd_timeout_func(struct timer_list *t)
mwifiex_dbg(adapter, MSG,
"is_cmd_timedout = %d\n",
- adapter->is_cmd_timedout);
+ test_bit(MWIFIEX_IS_CMD_TIMEDOUT,
+ &adapter->work_flags));
mwifiex_dbg(adapter, MSG,
"num_tx_timeout = %d\n",
adapter->dbg.num_tx_timeout);
@@ -1135,7 +1138,8 @@ void
mwifiex_hs_activated_event(struct mwifiex_private *priv, u8 activated)
{
if (activated) {
- if (priv->adapter->is_hs_configured) {
+ if (test_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &priv->adapter->work_flags)) {
priv->adapter->hs_activated = true;
mwifiex_update_rxreor_flags(priv->adapter,
RXREOR_FORCE_NO_DROP);
@@ -1186,11 +1190,11 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
phs_cfg->params.hs_config.gap);
}
if (conditions != HS_CFG_CANCEL) {
- adapter->is_hs_configured = true;
+ set_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
if (adapter->iface_type == MWIFIEX_USB)
mwifiex_hs_activated_event(priv, true);
} else {
- adapter->is_hs_configured = false;
+ clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
if (adapter->hs_activated)
mwifiex_hs_activated_event(priv, false);
}
@@ -1212,8 +1216,8 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter)
adapter->if_ops.wakeup(adapter);
adapter->hs_activated = false;
- adapter->is_hs_configured = false;
- adapter->is_suspended = false;
+ clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
+ clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
MWIFIEX_BSS_ROLE_ANY),
false);
@@ -1273,7 +1277,7 @@ mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter,
return;
}
adapter->pm_wakeup_card_req = true;
- if (adapter->is_hs_configured)
+ if (test_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags))
mwifiex_hs_activated_event(mwifiex_get_priv
(adapter, MWIFIEX_BSS_ROLE_ANY),
true);
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index 07453932f703..cce70252fd96 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -813,7 +813,7 @@ mwifiex_hscfg_write(struct file *file, const char __user *ubuf,
MWIFIEX_SYNC_CMD, &hscfg);
mwifiex_enable_hs(priv->adapter);
- priv->adapter->hs_enabling = false;
+ clear_bit(MWIFIEX_IS_HS_ENABLING, &priv->adapter->work_flags);
ret = count;
done:
kfree(buf);
diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
index b10baacb51c9..75cbd609d606 100644
--- a/drivers/net/wireless/marvell/mwifiex/ie.c
+++ b/drivers/net/wireless/marvell/mwifiex/ie.c
@@ -355,8 +355,14 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
case WLAN_EID_HT_OPERATION:
case WLAN_EID_VHT_CAPABILITY:
case WLAN_EID_VHT_OPERATION:
- case WLAN_EID_VENDOR_SPECIFIC:
break;
+ case WLAN_EID_VENDOR_SPECIFIC:
+ /* Skip only Microsoft WMM IE */
+ if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WMM,
+ (const u8 *)hdr,
+ hdr->len + sizeof(struct ieee_types_header)))
+ break;
default:
memcpy(gen_ie->ie_buffer + ie_len, hdr,
hdr->len + sizeof(struct ieee_types_header));
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index d239e9248c05..673e89dff0b5 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -233,7 +233,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->event_received = false;
adapter->data_received = false;
- adapter->surprise_removed = false;
+ clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
@@ -270,7 +270,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
- adapter->is_hs_configured = false;
+ clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
adapter->hs_cfg.conditions = cpu_to_le32(HS_CFG_COND_DEF);
adapter->hs_cfg.gpio = HS_CFG_GPIO_DEF;
adapter->hs_cfg.gap = HS_CFG_GAP_DEF;
@@ -439,7 +439,6 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
for (i = 0; i < adapter->priv_num; i++) {
if (adapter->priv[i]) {
priv = adapter->priv[i];
- spin_lock_init(&priv->rx_pkt_lock);
spin_lock_init(&priv->wmm.ra_list_spinlock);
spin_lock_init(&priv->curr_bcn_buf_lock);
spin_lock_init(&priv->sta_list_spinlock);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index fa3e8ddfe9a9..20cee5c397fb 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -404,7 +404,8 @@ process_start:
!skb_queue_empty(&adapter->tx_data_q)) {
mwifiex_process_tx_queue(adapter);
if (adapter->hs_activated) {
- adapter->is_hs_configured = false;
+ clear_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &adapter->work_flags);
mwifiex_hs_activated_event
(mwifiex_get_priv
(adapter, MWIFIEX_BSS_ROLE_ANY),
@@ -420,7 +421,8 @@ process_start:
(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
mwifiex_process_bypass_tx(adapter);
if (adapter->hs_activated) {
- adapter->is_hs_configured = false;
+ clear_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &adapter->work_flags);
mwifiex_hs_activated_event
(mwifiex_get_priv
(adapter, MWIFIEX_BSS_ROLE_ANY),
@@ -435,7 +437,8 @@ process_start:
(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
mwifiex_wmm_process_tx(adapter);
if (adapter->hs_activated) {
- adapter->is_hs_configured = false;
+ clear_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &adapter->work_flags);
mwifiex_hs_activated_event
(mwifiex_get_priv
(adapter, MWIFIEX_BSS_ROLE_ANY),
@@ -647,7 +650,7 @@ err_dnld_fw:
if (adapter->if_ops.unregister_dev)
adapter->if_ops.unregister_dev(adapter);
- adapter->surprise_removed = true;
+ set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
mwifiex_terminate_workqueue(adapter);
if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
@@ -870,7 +873,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
"data: %lu BSS(%d-%d): Data <= kernel\n",
jiffies, priv->bss_type, priv->bss_num);
- if (priv->adapter->surprise_removed) {
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &priv->adapter->work_flags)) {
kfree_skb(skb);
priv->stats.tx_dropped++;
return 0;
@@ -1372,7 +1375,7 @@ static void mwifiex_rx_work_queue(struct work_struct *work)
struct mwifiex_adapter *adapter =
container_of(work, struct mwifiex_adapter, rx_work);
- if (adapter->surprise_removed)
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags))
return;
mwifiex_process_rx(adapter);
}
@@ -1388,7 +1391,7 @@ static void mwifiex_main_work_queue(struct work_struct *work)
struct mwifiex_adapter *adapter =
container_of(work, struct mwifiex_adapter, main_work);
- if (adapter->surprise_removed)
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags))
return;
mwifiex_main_process(adapter);
}
@@ -1405,7 +1408,7 @@ static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter)
if (adapter->if_ops.disable_int)
adapter->if_ops.disable_int(adapter);
- adapter->surprise_removed = true;
+ set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
mwifiex_terminate_workqueue(adapter);
adapter->int_status = 0;
@@ -1493,11 +1496,11 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter)
adapter->if_ops.up_dev(adapter);
adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
- adapter->surprise_removed = false;
+ clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
init_waitqueue_head(&adapter->init_wait_q);
- adapter->is_suspended = false;
+ clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
adapter->hs_activated = false;
- adapter->is_cmd_timedout = 0;
+ clear_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags);
init_waitqueue_head(&adapter->hs_activate_wait_q);
init_waitqueue_head(&adapter->cmd_wait_q.wait);
adapter->cmd_wait_q.status = 0;
@@ -1552,7 +1555,7 @@ err_init_fw:
adapter->if_ops.unregister_dev(adapter);
err_kmalloc:
- adapter->surprise_removed = true;
+ set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
mwifiex_terminate_workqueue(adapter);
if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
mwifiex_dbg(adapter, ERROR,
@@ -1649,9 +1652,9 @@ mwifiex_add_card(void *card, struct completion *fw_done,
adapter->fw_done = fw_done;
adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
- adapter->surprise_removed = false;
+ clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
init_waitqueue_head(&adapter->init_wait_q);
- adapter->is_suspended = false;
+ clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
adapter->hs_activated = false;
init_waitqueue_head(&adapter->hs_activate_wait_q);
init_waitqueue_head(&adapter->cmd_wait_q.wait);
@@ -1699,7 +1702,7 @@ err_init_fw:
if (adapter->if_ops.unregister_dev)
adapter->if_ops.unregister_dev(adapter);
err_registerdev:
- adapter->surprise_removed = true;
+ set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
mwifiex_terminate_workqueue(adapter);
if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
pr_debug("info: %s: shutdown mwifiex\n", __func__);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 69ac0a22c28c..b025ba164412 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -517,6 +517,14 @@ enum mwifiex_iface_work_flags {
MWIFIEX_IFACE_WORK_CARD_RESET,
};
+enum mwifiex_adapter_work_flags {
+ MWIFIEX_SURPRISE_REMOVED,
+ MWIFIEX_IS_CMD_TIMEDOUT,
+ MWIFIEX_IS_SUSPENDED,
+ MWIFIEX_IS_HS_CONFIGURED,
+ MWIFIEX_IS_HS_ENABLING,
+};
+
struct mwifiex_band_config {
u8 chan_band:2;
u8 chan_width:2;
@@ -616,9 +624,6 @@ struct mwifiex_private {
struct list_head rx_reorder_tbl_ptr;
/* spin lock for rx_reorder_tbl_ptr queue */
spinlock_t rx_reorder_tbl_lock;
- /* spin lock for Rx packets */
- spinlock_t rx_pkt_lock;
-
#define MWIFIEX_ASSOC_RSP_BUF_SIZE 500
u8 assoc_rsp_buf[MWIFIEX_ASSOC_RSP_BUF_SIZE];
u32 assoc_rsp_size;
@@ -875,7 +880,7 @@ struct mwifiex_adapter {
struct device *dev;
struct wiphy *wiphy;
u8 perm_addr[ETH_ALEN];
- bool surprise_removed;
+ unsigned long work_flags;
u32 fw_release_number;
u8 intf_hdr_len;
u16 init_wait_q_woken;
@@ -929,7 +934,6 @@ struct mwifiex_adapter {
struct cmd_ctrl_node *curr_cmd;
/* spin lock for command */
spinlock_t mwifiex_cmd_lock;
- u8 is_cmd_timedout;
u16 last_init_cmd;
struct timer_list cmd_timer;
struct list_head cmd_free_q;
@@ -979,13 +983,10 @@ struct mwifiex_adapter {
u16 pps_uapsd_mode;
u32 pm_wakeup_fw_try;
struct timer_list wakeup_timer;
- u8 is_hs_configured;
struct mwifiex_hs_config_param hs_cfg;
u8 hs_activated;
u16 hs_activate_wait_q_woken;
wait_queue_head_t hs_activate_wait_q;
- bool is_suspended;
- bool hs_enabling;
u8 event_body[MAX_EVENT_SIZE];
u32 hw_dot_11n_dev_cap;
u8 hw_dev_mcs_support;
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 0c42b7296ddd..3fe81b2a929a 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -170,7 +170,7 @@ static int mwifiex_pcie_suspend(struct device *dev)
if (!mwifiex_enable_hs(adapter)) {
mwifiex_dbg(adapter, ERROR,
"cmd: failed to suspend\n");
- adapter->hs_enabling = false;
+ clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
mwifiex_disable_wake(adapter);
return -EFAULT;
}
@@ -178,8 +178,8 @@ static int mwifiex_pcie_suspend(struct device *dev)
flush_workqueue(adapter->workqueue);
/* Indicate device suspended */
- adapter->is_suspended = true;
- adapter->hs_enabling = false;
+ set_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
+ clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
return 0;
}
@@ -207,13 +207,13 @@ static int mwifiex_pcie_resume(struct device *dev)
adapter = card->adapter;
- if (!adapter->is_suspended) {
+ if (!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
mwifiex_dbg(adapter, WARN,
"Device already resumed\n");
return 0;
}
- adapter->is_suspended = false;
+ clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
MWIFIEX_ASYNC_CMD);
@@ -2430,7 +2430,7 @@ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context)
}
adapter = card->adapter;
- if (adapter->surprise_removed)
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags))
goto exit;
if (card->msix_enable)
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 895b806cdb03..8e483b0bc3b1 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1495,7 +1495,8 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
return -EBUSY;
}
- if (adapter->surprise_removed || adapter->is_cmd_timedout) {
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags) ||
+ test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"Ignore scan. Card removed or firmware in bad state\n");
return -EFAULT;
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index dfdcbc4f141a..d49fbd58afa7 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -181,13 +181,13 @@ static int mwifiex_sdio_resume(struct device *dev)
adapter = card->adapter;
- if (!adapter->is_suspended) {
+ if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
mwifiex_dbg(adapter, WARN,
"device already resumed\n");
return 0;
}
- adapter->is_suspended = false;
+ clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
/* Disable Host Sleep */
mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
@@ -260,7 +260,7 @@ mwifiex_write_data_sync(struct mwifiex_adapter *adapter,
MWIFIEX_SDIO_BLOCK_SIZE) : pkt_len;
u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK);
- if (adapter->is_suspended) {
+ if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"%s: not allowed while suspended\n", __func__);
return -1;
@@ -450,7 +450,7 @@ static int mwifiex_sdio_suspend(struct device *dev)
if (!mwifiex_enable_hs(adapter)) {
mwifiex_dbg(adapter, ERROR,
"cmd: failed to suspend\n");
- adapter->hs_enabling = false;
+ clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
mwifiex_disable_wake(adapter);
return -EFAULT;
}
@@ -460,8 +460,8 @@ static int mwifiex_sdio_suspend(struct device *dev)
ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
/* Indicate device suspended */
- adapter->is_suspended = true;
- adapter->hs_enabling = false;
+ set_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
+ clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
return ret;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index 03a6492662ca..a327fc5b36e3 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -224,7 +224,8 @@ void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code,
adapter->tx_lock_flag = false;
adapter->pps_uapsd_mode = false;
- if (adapter->is_cmd_timedout && adapter->curr_cmd)
+ if (test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags) &&
+ adapter->curr_cmd)
return;
priv->media_connected = false;
mwifiex_dbg(adapter, MSG,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index 5414b755cf82..b454b5f85503 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -419,7 +419,8 @@ int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
}
if (hs_cfg->is_invoke_hostcmd) {
if (hs_cfg->conditions == HS_CFG_CANCEL) {
- if (!adapter->is_hs_configured)
+ if (!test_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &adapter->work_flags))
/* Already cancelled */
break;
/* Save previous condition */
@@ -535,7 +536,7 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
memset(&hscfg, 0, sizeof(hscfg));
hscfg.is_invoke_hostcmd = true;
- adapter->hs_enabling = true;
+ set_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
mwifiex_cancel_all_pending_cmd(adapter);
if (mwifiex_set_hs_params(mwifiex_get_priv(adapter,
@@ -601,7 +602,8 @@ int mwifiex_get_bss_info(struct mwifiex_private *priv,
else
info->wep_status = false;
- info->is_hs_configured = adapter->is_hs_configured;
+ info->is_hs_configured = test_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &adapter->work_flags);
info->is_deep_sleep = adapter->is_deep_sleep;
return 0;
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_tx.c b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
index 620f8650a742..37c24b95e642 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
@@ -143,7 +143,7 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
int ret;
struct mwifiex_txinfo *tx_info = NULL;
- if (adapter->surprise_removed)
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags))
return -1;
if (!priv->media_connected)
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index 5ce85d5727e4..a83c5afc256a 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -421,12 +421,15 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
}
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
if (!priv->ap_11n_enabled ||
(!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
(le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
ret = mwifiex_handle_uap_rx_forward(priv, skb);
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return ret;
}
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
/* Reorder and send to kernel */
pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 88f4c89f89ba..433c6a16870b 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -181,7 +181,8 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
atomic_dec(&card->rx_data_urb_pending);
if (recv_length) {
- if (urb->status || (adapter->surprise_removed)) {
+ if (urb->status ||
+ test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"URB status is failed: %d\n", urb->status);
/* Do not free skb in case of command ep */
@@ -218,10 +219,10 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
dev_kfree_skb_any(skb);
}
} else if (urb->status) {
- if (!adapter->is_suspended) {
+ if (!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
mwifiex_dbg(adapter, FATAL,
"Card is removed: %d\n", urb->status);
- adapter->surprise_removed = true;
+ set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
}
dev_kfree_skb_any(skb);
return;
@@ -529,7 +530,7 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
return 0;
}
- if (unlikely(adapter->is_suspended))
+ if (unlikely(test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)))
mwifiex_dbg(adapter, WARN,
"Device already suspended\n");
@@ -537,19 +538,19 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
if (!mwifiex_enable_hs(adapter)) {
mwifiex_dbg(adapter, ERROR,
"cmd: failed to suspend\n");
- adapter->hs_enabling = false;
+ clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
return -EFAULT;
}
- /* 'is_suspended' flag indicates device is suspended.
+ /* 'MWIFIEX_IS_SUSPENDED' bit indicates device is suspended.
* It must be set here before the usb_kill_urb() calls. Reason
* is in the complete handlers, urb->status(= -ENOENT) and
* this flag is used in combination to distinguish between a
* 'suspended' state and a 'disconnect' one.
*/
- adapter->is_suspended = true;
- adapter->hs_enabling = false;
+ set_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
+ clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb)
usb_kill_urb(card->rx_cmd.urb);
@@ -593,7 +594,7 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
}
adapter = card->adapter;
- if (unlikely(!adapter->is_suspended)) {
+ if (unlikely(!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags))) {
mwifiex_dbg(adapter, WARN,
"Device already resumed\n");
return 0;
@@ -602,7 +603,7 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
/* Indicate device resumed. The netdev queue will be resumed only
* after the urbs have been re-submitted
*/
- adapter->is_suspended = false;
+ clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
if (!atomic_read(&card->rx_data_urb_pending))
for (i = 0; i < MWIFIEX_RX_DATA_URB; i++)
@@ -1158,13 +1159,13 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
unsigned long flags;
int idx, ret;
- if (adapter->is_suspended) {
+ if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR,
"%s: not allowed while suspended\n", __func__);
return -1;
}
- if (adapter->surprise_removed) {
+ if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) {
mwifiex_dbg(adapter, ERROR, "%s: device removed\n", __func__);
return -1;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
index 6dd212898117..f9b71539d33e 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.c
+++ b/drivers/net/wireless/marvell/mwifiex/util.c
@@ -197,9 +197,11 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
info->is_deep_sleep = adapter->is_deep_sleep;
info->pm_wakeup_card_req = adapter->pm_wakeup_card_req;
info->pm_wakeup_fw_try = adapter->pm_wakeup_fw_try;
- info->is_hs_configured = adapter->is_hs_configured;
+ info->is_hs_configured = test_bit(MWIFIEX_IS_HS_CONFIGURED,
+ &adapter->work_flags);
info->hs_activated = adapter->hs_activated;
- info->is_cmd_timedout = adapter->is_cmd_timedout;
+ info->is_cmd_timedout = test_bit(MWIFIEX_IS_CMD_TIMEDOUT,
+ &adapter->work_flags);
info->num_cmd_host_to_card_failure
= adapter->dbg.num_cmd_host_to_card_failure;
info->num_cmd_sleep_cfm_host_to_card_failure
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index 936a0a841af8..407b9932ca4d 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -599,7 +599,7 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
if (priv->adapter->if_ops.clean_pcie_ring &&
- !priv->adapter->surprise_removed)
+ !test_bit(MWIFIEX_SURPRISE_REMOVED, &priv->adapter->work_flags))
priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig
index fc05d79c80d0..850611ad347a 100644
--- a/drivers/net/wireless/mediatek/mt76/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/Kconfig
@@ -1,10 +1,36 @@
config MT76_CORE
tristate
+config MT76_USB
+ tristate
+ depends on MT76_CORE
+
+config MT76x2_COMMON
+ tristate
+ depends on MT76_CORE
+
+config MT76x0U
+ tristate "MediaTek MT76x0U (USB) support"
+ depends on MAC80211
+ depends on USB
+ help
+ This adds support for MT7610U-based wireless USB dongles.
+
config MT76x2E
tristate "MediaTek MT76x2E (PCIe) support"
select MT76_CORE
+ select MT76x2_COMMON
depends on MAC80211
depends on PCI
---help---
This adds support for MT7612/MT7602/MT7662-based wireless PCIe devices.
+
+config MT76x2U
+ tristate "MediaTek MT76x2U (USB) support"
+ select MT76_CORE
+ select MT76_USB
+ select MT76x2_COMMON
+ depends on MAC80211
+ depends on USB
+ help
+ This adds support for MT7612U-based wireless USB dongles.
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index a0156bc01dea..158d10d2716c 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -1,15 +1,31 @@
obj-$(CONFIG_MT76_CORE) += mt76.o
+obj-$(CONFIG_MT76_USB) += mt76-usb.o
+obj-$(CONFIG_MT76x0U) += mt76x0/
+obj-$(CONFIG_MT76x2_COMMON) += mt76x2-common.o
obj-$(CONFIG_MT76x2E) += mt76x2e.o
+obj-$(CONFIG_MT76x2U) += mt76x2u.o
mt76-y := \
mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o tx.o agg-rx.o
+mt76-usb-y := usb.o usb_trace.o usb_mcu.o
+
CFLAGS_trace.o := -I$(src)
+CFLAGS_usb_trace.o := -I$(src)
+
+mt76x2-common-y := \
+ mt76x2_eeprom.o mt76x2_tx_common.o mt76x2_mac_common.o \
+ mt76x2_init_common.o mt76x2_common.o mt76x2_phy_common.o \
+ mt76x2_debugfs.o
mt76x2e-y := \
mt76x2_pci.o mt76x2_dma.o \
- mt76x2_main.o mt76x2_init.o mt76x2_debugfs.o mt76x2_tx.o \
- mt76x2_core.o mt76x2_mac.o mt76x2_eeprom.o mt76x2_mcu.o mt76x2_phy.o \
+ mt76x2_main.o mt76x2_init.o mt76x2_tx.o \
+ mt76x2_core.o mt76x2_mac.o mt76x2_mcu.o mt76x2_phy.o \
mt76x2_dfs.o mt76x2_trace.o
+mt76x2u-y := \
+ mt76x2_usb.o mt76x2u_init.o mt76x2u_main.o mt76x2u_mac.o \
+ mt76x2u_mcu.o mt76x2u_phy.o mt76x2u_core.o
+
CFLAGS_mt76x2_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index 1e8cdce919d9..73c8b2805c97 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -113,7 +113,7 @@ mt76_rx_aggr_reorder_work(struct work_struct *work)
if (nframes)
ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
REORDER_TIMEOUT);
- mt76_rx_complete(dev, &frames, -1);
+ mt76_rx_complete(dev, &frames, NULL);
rcu_read_unlock();
local_bh_enable();
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 3dbedcedc2c4..c51da2205b93 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -239,6 +239,80 @@ mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
iowrite32(q->head, &q->regs->cpu_idx);
}
+int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta)
+{
+ struct mt76_queue_entry e;
+ struct mt76_txwi_cache *t;
+ struct mt76_queue_buf buf[32];
+ struct sk_buff *iter;
+ dma_addr_t addr;
+ int len;
+ u32 tx_info = 0;
+ int n, ret;
+
+ t = mt76_get_txwi(dev);
+ if (!t) {
+ ieee80211_free_txskb(dev->hw, skb);
+ return -ENOMEM;
+ }
+
+ dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
+ DMA_TO_DEVICE);
+ ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
+ &tx_info);
+ dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
+ DMA_TO_DEVICE);
+ if (ret < 0)
+ goto free;
+
+ len = skb->len - skb->data_len;
+ addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev->dev, addr)) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ n = 0;
+ buf[n].addr = t->dma_addr;
+ buf[n++].len = dev->drv->txwi_size;
+ buf[n].addr = addr;
+ buf[n++].len = len;
+
+ skb_walk_frags(skb, iter) {
+ if (n == ARRAY_SIZE(buf))
+ goto unmap;
+
+ addr = dma_map_single(dev->dev, iter->data, iter->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev->dev, addr))
+ goto unmap;
+
+ buf[n].addr = addr;
+ buf[n++].len = iter->len;
+ }
+
+ if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
+ goto unmap;
+
+ return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t);
+
+unmap:
+ ret = -ENOMEM;
+ for (n--; n > 0; n--)
+ dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
+ DMA_TO_DEVICE);
+
+free:
+ e.skb = skb;
+ e.txwi = t;
+ dev->drv->tx_complete_skb(dev, q, &e, true);
+ mt76_put_txwi(dev, t);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_dma_tx_queue_skb);
+
static int
mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi)
{
@@ -400,7 +474,7 @@ mt76_dma_rx_poll(struct napi_struct *napi, int budget)
do {
cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
- mt76_rx_poll_complete(dev, qid);
+ mt76_rx_poll_complete(dev, qid, napi);
done += cur;
} while (cur && done < budget);
@@ -436,6 +510,7 @@ static const struct mt76_queue_ops mt76_dma_ops = {
.init = mt76_dma_init,
.alloc = mt76_dma_alloc_queue,
.add_buf = mt76_dma_add_buf,
+ .tx_queue_skb = mt76_dma_tx_queue_skb,
.tx_cleanup = mt76_dma_tx_cleanup,
.rx_reset = mt76_dma_rx_reset,
.kick = mt76_dma_kick_queue,
diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h
index 1dad39697929..27248e24a19b 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.h
+++ b/drivers/net/wireless/mediatek/mt76/dma.h
@@ -25,6 +25,39 @@
#define MT_DMA_CTL_LAST_SEC0 BIT(30)
#define MT_DMA_CTL_DMA_DONE BIT(31)
+#define MT_TXD_INFO_LEN GENMASK(15, 0)
+#define MT_TXD_INFO_NEXT_VLD BIT(16)
+#define MT_TXD_INFO_TX_BURST BIT(17)
+#define MT_TXD_INFO_80211 BIT(19)
+#define MT_TXD_INFO_TSO BIT(20)
+#define MT_TXD_INFO_CSO BIT(21)
+#define MT_TXD_INFO_WIV BIT(24)
+#define MT_TXD_INFO_QSEL GENMASK(26, 25)
+#define MT_TXD_INFO_DPORT GENMASK(29, 27)
+#define MT_TXD_INFO_TYPE GENMASK(31, 30)
+
+#define MT_RX_FCE_INFO_LEN GENMASK(13, 0)
+#define MT_RX_FCE_INFO_SELF_GEN BIT(15)
+#define MT_RX_FCE_INFO_CMD_SEQ GENMASK(19, 16)
+#define MT_RX_FCE_INFO_EVT_TYPE GENMASK(23, 20)
+#define MT_RX_FCE_INFO_PCIE_INTR BIT(24)
+#define MT_RX_FCE_INFO_QSEL GENMASK(26, 25)
+#define MT_RX_FCE_INFO_D_PORT GENMASK(29, 27)
+#define MT_RX_FCE_INFO_TYPE GENMASK(31, 30)
+
+/* MCU request message header */
+#define MT_MCU_MSG_LEN GENMASK(15, 0)
+#define MT_MCU_MSG_CMD_SEQ GENMASK(19, 16)
+#define MT_MCU_MSG_CMD_TYPE GENMASK(26, 20)
+#define MT_MCU_MSG_PORT GENMASK(29, 27)
+#define MT_MCU_MSG_TYPE GENMASK(31, 30)
+#define MT_MCU_MSG_TYPE_CMD BIT(30)
+
+#define MT_DMA_HDR_LEN 4
+#define MT_RX_INFO_LEN 4
+#define MT_FCE_INFO_LEN 4
+#define MT_RX_RXWI_LEN 32
+
struct mt76_desc {
__le32 buf0;
__le32 ctrl;
@@ -32,6 +65,16 @@ struct mt76_desc {
__le32 info;
} __packed __aligned(4);
+enum dma_msg_port {
+ WLAN_PORT,
+ CPU_RX_PORT,
+ CPU_TX_PORT,
+ HOST_PORT,
+ VIRTUAL_CPU_RX_PORT,
+ VIRTUAL_CPU_TX_PORT,
+ DISCARD,
+};
+
int mt76_dma_attach(struct mt76_dev *dev);
void mt76_dma_cleanup(struct mt76_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index d62e34e7eadf..029d54bce9e8 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -303,14 +303,6 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
SET_IEEE80211_DEV(hw, dev->dev);
SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
- wiphy->interface_modes =
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP) |
-#ifdef CONFIG_MAC80211_MESH
- BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
- BIT(NL80211_IFTYPE_ADHOC);
-
wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
wiphy->available_antennas_tx = dev->antenna_mask;
@@ -591,15 +583,11 @@ mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb)
}
void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
- int queue)
+ struct napi_struct *napi)
{
- struct napi_struct *napi = NULL;
struct ieee80211_sta *sta;
struct sk_buff *skb;
- if (queue >= 0)
- napi = &dev->napi[queue];
-
spin_lock(&dev->rx_lock);
while ((skb = __skb_dequeue(frames)) != NULL) {
if (mt76_check_ccmp_pn(skb)) {
@@ -613,7 +601,8 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
spin_unlock(&dev->rx_lock);
}
-void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q)
+void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
+ struct napi_struct *napi)
{
struct sk_buff_head frames;
struct sk_buff *skb;
@@ -625,5 +614,6 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q)
mt76_rx_aggr_reorder(skb, &frames);
}
- mt76_rx_complete(dev, &frames, q);
+ mt76_rx_complete(dev, &frames, napi);
}
+EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 96e9798bb8a0..2eab35879163 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -22,6 +22,7 @@
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/leds.h>
+#include <linux/usb.h>
#include <net/mac80211.h>
#include "util.h"
@@ -30,6 +31,7 @@
#define MT_RX_BUF_SIZE 2048
struct mt76_dev;
+struct mt76_wcid;
struct mt76_bus_ops {
u32 (*rr)(struct mt76_dev *dev, u32 offset);
@@ -62,12 +64,22 @@ struct mt76_queue_buf {
int len;
};
+struct mt76u_buf {
+ struct mt76_dev *dev;
+ struct urb *urb;
+ size_t len;
+ bool done;
+};
+
struct mt76_queue_entry {
union {
void *buf;
struct sk_buff *skb;
};
- struct mt76_txwi_cache *txwi;
+ union {
+ struct mt76_txwi_cache *txwi;
+ struct mt76u_buf ubuf;
+ };
bool schedule;
};
@@ -88,6 +100,7 @@ struct mt76_queue {
struct list_head swq;
int swq_queued;
+ u16 first;
u16 head;
u16 tail;
int ndesc;
@@ -110,6 +123,10 @@ struct mt76_queue_ops {
struct mt76_queue_buf *buf, int nbufs, u32 info,
struct sk_buff *skb, void *txwi);
+ int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta);
+
void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
int *len, u32 *info, bool *more);
@@ -187,9 +204,13 @@ struct mt76_rx_tid {
enum {
MT76_STATE_INITIALIZED,
MT76_STATE_RUNNING,
+ MT76_STATE_MCU_RUNNING,
MT76_SCANNING,
MT76_RESET,
MT76_OFFCHANNEL,
+ MT76_REMOVED,
+ MT76_READING_STATS,
+ MT76_MORE_STATS,
};
struct mt76_hw_cap {
@@ -210,6 +231,8 @@ struct mt76_driver_ops {
void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_entry *e, bool flush);
+ bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
+
void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
struct sk_buff *skb);
@@ -229,6 +252,64 @@ struct mt76_sband {
struct mt76_channel_state *chan;
};
+/* addr req mask */
+#define MT_VEND_TYPE_EEPROM BIT(31)
+#define MT_VEND_TYPE_CFG BIT(30)
+#define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
+
+#define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n))
+enum mt_vendor_req {
+ MT_VEND_DEV_MODE = 0x1,
+ MT_VEND_WRITE = 0x2,
+ MT_VEND_MULTI_WRITE = 0x6,
+ MT_VEND_MULTI_READ = 0x7,
+ MT_VEND_READ_EEPROM = 0x9,
+ MT_VEND_WRITE_FCE = 0x42,
+ MT_VEND_WRITE_CFG = 0x46,
+ MT_VEND_READ_CFG = 0x47,
+};
+
+enum mt76u_in_ep {
+ MT_EP_IN_PKT_RX,
+ MT_EP_IN_CMD_RESP,
+ __MT_EP_IN_MAX,
+};
+
+enum mt76u_out_ep {
+ MT_EP_OUT_INBAND_CMD,
+ MT_EP_OUT_AC_BK,
+ MT_EP_OUT_AC_BE,
+ MT_EP_OUT_AC_VI,
+ MT_EP_OUT_AC_VO,
+ MT_EP_OUT_HCCA,
+ __MT_EP_OUT_MAX,
+};
+
+#define MT_SG_MAX_SIZE 8
+#define MT_NUM_TX_ENTRIES 256
+#define MT_NUM_RX_ENTRIES 128
+#define MCU_RESP_URB_SIZE 1024
+struct mt76_usb {
+ struct mutex usb_ctrl_mtx;
+ u8 data[32];
+
+ struct tasklet_struct rx_tasklet;
+ struct tasklet_struct tx_tasklet;
+ struct delayed_work stat_work;
+
+ u8 out_ep[__MT_EP_OUT_MAX];
+ u16 out_max_packet;
+ u8 in_ep[__MT_EP_IN_MAX];
+ u16 in_max_packet;
+
+ struct mt76u_mcu {
+ struct mutex mutex;
+ struct completion cmpl;
+ struct mt76u_buf res;
+ u32 msg_seq;
+ } mcu;
+};
+
struct mt76_dev {
struct ieee80211_hw *hw;
struct cfg80211_chan_def chandef;
@@ -271,6 +352,8 @@ struct mt76_dev {
char led_name[32];
bool led_al;
u8 led_pin;
+
+ struct mt76_usb usb;
};
enum mt76_phy_type {
@@ -402,6 +485,14 @@ static inline int mt76_decr(int val, int size)
return (val - 1) & (size - 1);
}
+/* Hardware uses mirrored order of queues with Q3
+ * having the highest priority
+ */
+static inline u8 q2hwq(u8 q)
+{
+ return q ^ 0x3;
+}
+
static inline struct ieee80211_txq *
mtxq_to_txq(struct mt76_txq *mtxq)
{
@@ -421,9 +512,9 @@ wcid_to_sta(struct mt76_wcid *wcid)
return container_of(ptr, struct ieee80211_sta, drv_priv);
}
-int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta);
+int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta);
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
@@ -454,10 +545,69 @@ void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
/* internal */
void mt76_tx_free(struct mt76_dev *dev);
+struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);
void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
- int queue);
-void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q);
+ struct napi_struct *napi);
+void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
+ struct napi_struct *napi);
void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
+/* usb */
+static inline bool mt76u_urb_error(struct urb *urb)
+{
+ return urb->status &&
+ urb->status != -ECONNRESET &&
+ urb->status != -ESHUTDOWN &&
+ urb->status != -ENOENT;
+}
+
+/* Map hardware queues to usb endpoints */
+static inline u8 q2ep(u8 qid)
+{
+ /* TODO: take management packets to queue 5 */
+ return qid + 1;
+}
+
+static inline bool mt76u_check_sg(struct mt76_dev *dev)
+{
+ struct usb_interface *intf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+
+ return (udev->bus->sg_tablesize > 0 &&
+ (udev->bus->no_sg_constraint ||
+ udev->speed == USB_SPEED_WIRELESS));
+}
+
+int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
+ u8 req_type, u16 val, u16 offset,
+ void *buf, size_t len);
+void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
+ const u16 offset, const u32 val);
+u32 mt76u_rr(struct mt76_dev *dev, u32 addr);
+void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val);
+int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
+void mt76u_deinit(struct mt76_dev *dev);
+int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
+ int nsgs, int len, int sglen, gfp_t gfp);
+void mt76u_buf_free(struct mt76u_buf *buf);
+int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
+ struct mt76u_buf *buf, gfp_t gfp,
+ usb_complete_t complete_fn, void *context);
+int mt76u_submit_rx_buffers(struct mt76_dev *dev);
+int mt76u_alloc_queues(struct mt76_dev *dev);
+void mt76u_stop_queues(struct mt76_dev *dev);
+void mt76u_stop_stat_wk(struct mt76_dev *dev);
+void mt76u_queues_deinit(struct mt76_dev *dev);
+int mt76u_skb_dma_info(struct sk_buff *skb, int port, u32 flags);
+
+int mt76u_mcu_fw_send_data(struct mt76_dev *dev, const void *data,
+ int data_len, u32 max_payload, u32 offset);
+void mt76u_mcu_complete_urb(struct urb *urb);
+struct sk_buff *mt76u_mcu_msg_alloc(const void *data, int len);
+int mt76u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
+ int cmd, bool wait_resp);
+void mt76u_mcu_fw_reset(struct mt76_dev *dev);
+int mt76u_mcu_init_rx(struct mt76_dev *dev);
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile b/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile
new file mode 100644
index 000000000000..7843908261ba
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_MT76x0U) += mt76x0.o
+
+mt76x0-objs = \
+ usb.o init.o main.o mcu.o trace.o dma.o eeprom.o phy.o \
+ mac.o util.o debugfs.o tx.o core.o
+# ccflags-y := -DDEBUG
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/core.c b/drivers/net/wireless/mediatek/mt76/mt76x0/core.c
new file mode 100644
index 000000000000..892803fce842
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/core.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+
+int mt76x0_wait_asic_ready(struct mt76x0_dev *dev)
+{
+ int i = 100;
+ u32 val;
+
+ do {
+ if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ return -EIO;
+
+ val = mt76_rr(dev, MT_MAC_CSR0);
+ if (val && ~val)
+ return 0;
+
+ udelay(10);
+ } while (i--);
+
+ return -EIO;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c
new file mode 100644
index 000000000000..e7a77a886068
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+
+#include "mt76x0.h"
+#include "eeprom.h"
+
+static int
+mt76_reg_set(void *data, u64 val)
+{
+ struct mt76x0_dev *dev = data;
+
+ mt76_wr(dev, dev->debugfs_reg, val);
+ return 0;
+}
+
+static int
+mt76_reg_get(void *data, u64 *val)
+{
+ struct mt76x0_dev *dev = data;
+
+ *val = mt76_rr(dev, dev->debugfs_reg);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n");
+
+static int
+mt76x0_ampdu_stat_read(struct seq_file *file, void *data)
+{
+ struct mt76x0_dev *dev = file->private;
+ int i, j;
+
+#define stat_printf(grp, off, name) \
+ seq_printf(file, #name ":\t%llu\n", dev->stats.grp[off])
+
+ stat_printf(rx_stat, 0, rx_crc_err);
+ stat_printf(rx_stat, 1, rx_phy_err);
+ stat_printf(rx_stat, 2, rx_false_cca);
+ stat_printf(rx_stat, 3, rx_plcp_err);
+ stat_printf(rx_stat, 4, rx_fifo_overflow);
+ stat_printf(rx_stat, 5, rx_duplicate);
+
+ stat_printf(tx_stat, 0, tx_fail_cnt);
+ stat_printf(tx_stat, 1, tx_bcn_cnt);
+ stat_printf(tx_stat, 2, tx_success);
+ stat_printf(tx_stat, 3, tx_retransmit);
+ stat_printf(tx_stat, 4, tx_zero_len);
+ stat_printf(tx_stat, 5, tx_underflow);
+
+ stat_printf(aggr_stat, 0, non_aggr_tx);
+ stat_printf(aggr_stat, 1, aggr_tx);
+
+ stat_printf(zero_len_del, 0, tx_zero_len_del);
+ stat_printf(zero_len_del, 1, rx_zero_len_del);
+#undef stat_printf
+
+ seq_puts(file, "Aggregations stats:\n");
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 8; j++)
+ seq_printf(file, "%08llx ",
+ dev->stats.aggr_n[i * 8 + j]);
+ seq_putc(file, '\n');
+ }
+
+ seq_printf(file, "recent average AMPDU len: %d\n",
+ atomic_read(&dev->avg_ampdu_len));
+
+ return 0;
+}
+
+static int
+mt76x0_ampdu_stat_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, mt76x0_ampdu_stat_read, inode->i_private);
+}
+
+static const struct file_operations fops_ampdu_stat = {
+ .open = mt76x0_ampdu_stat_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int
+mt76x0_eeprom_param_read(struct seq_file *file, void *data)
+{
+ struct mt76x0_dev *dev = file->private;
+ int i;
+
+ seq_printf(file, "RF freq offset: %hhx\n", dev->ee->rf_freq_off);
+ seq_printf(file, "RSSI offset 2GHz: %hhx %hhx\n",
+ dev->ee->rssi_offset_2ghz[0], dev->ee->rssi_offset_2ghz[1]);
+ seq_printf(file, "RSSI offset 5GHz: %hhx %hhx %hhx\n",
+ dev->ee->rssi_offset_5ghz[0], dev->ee->rssi_offset_5ghz[1],
+ dev->ee->rssi_offset_5ghz[2]);
+ seq_printf(file, "Temperature offset: %hhx\n", dev->ee->temp_off);
+ seq_printf(file, "LNA gain 2Ghz: %hhx\n", dev->ee->lna_gain_2ghz);
+ seq_printf(file, "LNA gain 5Ghz: %hhx %hhx %hhx\n",
+ dev->ee->lna_gain_5ghz[0], dev->ee->lna_gain_5ghz[1],
+ dev->ee->lna_gain_5ghz[2]);
+ seq_printf(file, "Power Amplifier type %hhx\n", dev->ee->pa_type);
+ seq_printf(file, "Reg channels: %hhu-%hhu\n", dev->ee->reg.start,
+ dev->ee->reg.start + dev->ee->reg.num - 1);
+
+ seq_puts(file, "Per channel power:\n");
+ for (i = 0; i < 58; i++)
+ seq_printf(file, "\t%d chan:%d pwr:%d\n", i, i,
+ dev->ee->tx_pwr_per_chan[i]);
+
+ seq_puts(file, "Per rate power 2GHz:\n");
+ for (i = 0; i < 5; i++)
+ seq_printf(file, "\t %d bw20:%d bw40:%d\n",
+ i, dev->ee->tx_pwr_cfg_2g[i][0],
+ dev->ee->tx_pwr_cfg_5g[i][1]);
+
+ seq_puts(file, "Per rate power 5GHz:\n");
+ for (i = 0; i < 5; i++)
+ seq_printf(file, "\t %d bw20:%d bw40:%d\n",
+ i, dev->ee->tx_pwr_cfg_5g[i][0],
+ dev->ee->tx_pwr_cfg_5g[i][1]);
+
+ return 0;
+}
+
+static int
+mt76x0_eeprom_param_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, mt76x0_eeprom_param_read, inode->i_private);
+}
+
+static const struct file_operations fops_eeprom_param = {
+ .open = mt76x0_eeprom_param_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void mt76x0_init_debugfs(struct mt76x0_dev *dev)
+{
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("mt76x0", dev->mt76.hw->wiphy->debugfsdir);
+ if (!dir)
+ return;
+
+ debugfs_create_u32("regidx", S_IRUSR | S_IWUSR, dir, &dev->debugfs_reg);
+ debugfs_create_file("regval", S_IRUSR | S_IWUSR, dir, dev,
+ &fops_regval);
+ debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat);
+ debugfs_create_file("eeprom_param", S_IRUSR, dir, dev,
+ &fops_eeprom_param);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c
new file mode 100644
index 000000000000..e2efb430419b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c
@@ -0,0 +1,522 @@
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "dma.h"
+#include "usb.h"
+#include "trace.h"
+
+static int mt76x0_submit_rx_buf(struct mt76x0_dev *dev,
+ struct mt76x0_dma_buf_rx *e, gfp_t gfp);
+
+static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
+{
+ const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
+ unsigned int hdrlen;
+
+ if (unlikely(len < 10))
+ return 0;
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ if (unlikely(hdrlen > len))
+ return 0;
+ return hdrlen;
+}
+
+static struct sk_buff *
+mt76x0_rx_skb_from_seg(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi,
+ void *data, u32 seg_len, u32 truesize, struct page *p)
+{
+ struct sk_buff *skb;
+ u32 true_len, hdr_len = 0, copy, frag;
+
+ skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ true_len = mt76x0_mac_process_rx(dev, skb, data, rxwi);
+ if (!true_len || true_len > seg_len)
+ goto bad_frame;
+
+ hdr_len = ieee80211_get_hdrlen_from_buf(data, true_len);
+ if (!hdr_len)
+ goto bad_frame;
+
+ if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
+ memcpy(skb_put(skb, hdr_len), data, hdr_len);
+
+ data += hdr_len + 2;
+ true_len -= hdr_len;
+ hdr_len = 0;
+ }
+
+ /* If not doing paged RX allocated skb will always have enough space */
+ copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
+ frag = true_len - copy;
+
+ memcpy(skb_put(skb, copy), data, copy);
+ data += copy;
+
+ if (frag) {
+ skb_add_rx_frag(skb, 0, p, data - page_address(p),
+ frag, truesize);
+ get_page(p);
+ }
+
+ return skb;
+
+bad_frame:
+ dev_err_ratelimited(dev->mt76.dev, "Error: incorrect frame len:%u hdr:%u\n",
+ true_len, hdr_len);
+ dev_kfree_skb(skb);
+ return NULL;
+}
+
+static void mt76x0_rx_process_seg(struct mt76x0_dev *dev, u8 *data,
+ u32 seg_len, struct page *p)
+{
+ struct sk_buff *skb;
+ struct mt76x0_rxwi *rxwi;
+ u32 fce_info, truesize = seg_len;
+
+ /* DMA_INFO field at the beginning of the segment contains only some of
+ * the information, we need to read the FCE descriptor from the end.
+ */
+ fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
+ seg_len -= MT_FCE_INFO_LEN;
+
+ data += MT_DMA_HDR_LEN;
+ seg_len -= MT_DMA_HDR_LEN;
+
+ rxwi = (struct mt76x0_rxwi *) data;
+ data += sizeof(struct mt76x0_rxwi);
+ seg_len -= sizeof(struct mt76x0_rxwi);
+
+ if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info)))
+ dev_err_once(dev->mt76.dev, "Error: RX path seen a non-pkt urb\n");
+
+ trace_mt76x0_rx(&dev->mt76, rxwi, fce_info);
+
+ skb = mt76x0_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p);
+ if (!skb)
+ return;
+
+ spin_lock(&dev->mac_lock);
+ ieee80211_rx(dev->mt76.hw, skb);
+ spin_unlock(&dev->mac_lock);
+}
+
+static u16 mt76x0_rx_next_seg_len(u8 *data, u32 data_len)
+{
+ u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
+ sizeof(struct mt76x0_rxwi) + MT_FCE_INFO_LEN;
+ u16 dma_len = get_unaligned_le16(data);
+
+ if (data_len < min_seg_len ||
+ WARN_ON(!dma_len) ||
+ WARN_ON(dma_len + MT_DMA_HDRS > data_len) ||
+ WARN_ON(dma_len & 0x3))
+ return 0;
+
+ return MT_DMA_HDRS + dma_len;
+}
+
+static void
+mt76x0_rx_process_entry(struct mt76x0_dev *dev, struct mt76x0_dma_buf_rx *e)
+{
+ u32 seg_len, data_len = e->urb->actual_length;
+ u8 *data = page_address(e->p);
+ struct page *new_p = NULL;
+ int cnt = 0;
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+ return;
+
+ /* Copy if there is very little data in the buffer. */
+ if (data_len > 512)
+ new_p = dev_alloc_pages(MT_RX_ORDER);
+
+ while ((seg_len = mt76x0_rx_next_seg_len(data, data_len))) {
+ mt76x0_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL);
+
+ data_len -= seg_len;
+ data += seg_len;
+ cnt++;
+ }
+
+ if (cnt > 1)
+ trace_mt76x0_rx_dma_aggr(&dev->mt76, cnt, !!new_p);
+
+ if (new_p) {
+ /* we have one extra ref from the allocator */
+ __free_pages(e->p, MT_RX_ORDER);
+
+ e->p = new_p;
+ }
+}
+
+static struct mt76x0_dma_buf_rx *
+mt76x0_rx_get_pending_entry(struct mt76x0_dev *dev)
+{
+ struct mt76x0_rx_queue *q = &dev->rx_q;
+ struct mt76x0_dma_buf_rx *buf = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->rx_lock, flags);
+
+ if (!q->pending)
+ goto out;
+
+ buf = &q->e[q->start];
+ q->pending--;
+ q->start = (q->start + 1) % q->entries;
+out:
+ spin_unlock_irqrestore(&dev->rx_lock, flags);
+
+ return buf;
+}
+
+static void mt76x0_complete_rx(struct urb *urb)
+{
+ struct mt76x0_dev *dev = urb->context;
+ struct mt76x0_rx_queue *q = &dev->rx_q;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->rx_lock, flags);
+
+ if (mt76x0_urb_has_error(urb))
+ dev_err(dev->mt76.dev, "Error: RX urb failed:%d\n", urb->status);
+ if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
+ goto out;
+
+ q->end = (q->end + 1) % q->entries;
+ q->pending++;
+ tasklet_schedule(&dev->rx_tasklet);
+out:
+ spin_unlock_irqrestore(&dev->rx_lock, flags);
+}
+
+static void mt76x0_rx_tasklet(unsigned long data)
+{
+ struct mt76x0_dev *dev = (struct mt76x0_dev *) data;
+ struct mt76x0_dma_buf_rx *e;
+
+ while ((e = mt76x0_rx_get_pending_entry(dev))) {
+ if (e->urb->status)
+ continue;
+
+ mt76x0_rx_process_entry(dev, e);
+ mt76x0_submit_rx_buf(dev, e, GFP_ATOMIC);
+ }
+}
+
+static void mt76x0_complete_tx(struct urb *urb)
+{
+ struct mt76x0_tx_queue *q = urb->context;
+ struct mt76x0_dev *dev = q->dev;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+
+ if (mt76x0_urb_has_error(urb))
+ dev_err(dev->mt76.dev, "Error: TX urb failed:%d\n", urb->status);
+ if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
+ goto out;
+
+ skb = q->e[q->start].skb;
+ trace_mt76x0_tx_dma_done(&dev->mt76, skb);
+
+ __skb_queue_tail(&dev->tx_skb_done, skb);
+ tasklet_schedule(&dev->tx_tasklet);
+
+ if (q->used == q->entries - q->entries / 8)
+ ieee80211_wake_queue(dev->mt76.hw, skb_get_queue_mapping(skb));
+
+ q->start = (q->start + 1) % q->entries;
+ q->used--;
+out:
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
+
+static void mt76x0_tx_tasklet(unsigned long data)
+{
+ struct mt76x0_dev *dev = (struct mt76x0_dev *) data;
+ struct sk_buff_head skbs;
+ unsigned long flags;
+
+ __skb_queue_head_init(&skbs);
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+
+ set_bit(MT76_MORE_STATS, &dev->mt76.state);
+ if (!test_and_set_bit(MT76_READING_STATS, &dev->mt76.state))
+ queue_delayed_work(dev->stat_wq, &dev->stat_work,
+ msecs_to_jiffies(10));
+
+ skb_queue_splice_init(&dev->tx_skb_done, &skbs);
+
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+
+ while (!skb_queue_empty(&skbs)) {
+ struct sk_buff *skb = __skb_dequeue(&skbs);
+
+ mt76x0_tx_status(dev, skb);
+ }
+}
+
+static int mt76x0_dma_submit_tx(struct mt76x0_dev *dev,
+ struct sk_buff *skb, u8 ep)
+{
+ struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+ unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_ep[ep]);
+ struct mt76x0_dma_buf_tx *e;
+ struct mt76x0_tx_queue *q = &dev->tx_q[ep];
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+
+ if (WARN_ON_ONCE(q->entries <= q->used)) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ e = &q->e[q->end];
+ e->skb = skb;
+ usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
+ mt76x0_complete_tx, q);
+ ret = usb_submit_urb(e->urb, GFP_ATOMIC);
+ if (ret) {
+ /* Special-handle ENODEV from TX urb submission because it will
+ * often be the first ENODEV we see after device is removed.
+ */
+ if (ret == -ENODEV)
+ set_bit(MT76_REMOVED, &dev->mt76.state);
+ else
+ dev_err(dev->mt76.dev, "Error: TX urb submit failed:%d\n",
+ ret);
+ goto out;
+ }
+
+ q->end = (q->end + 1) % q->entries;
+ q->used++;
+
+ if (q->used >= q->entries)
+ ieee80211_stop_queue(dev->mt76.hw, skb_get_queue_mapping(skb));
+out:
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+
+ return ret;
+}
+
+/* Map USB endpoint number to Q id in the DMA engine */
+static enum mt76_qsel ep2dmaq(u8 ep)
+{
+ if (ep == 5)
+ return MT_QSEL_MGMT;
+ return MT_QSEL_EDCA;
+}
+
+int mt76x0_dma_enqueue_tx(struct mt76x0_dev *dev, struct sk_buff *skb,
+ struct mt76_wcid *wcid, int hw_q)
+{
+ u8 ep = q2ep(hw_q);
+ u32 dma_flags;
+ int ret;
+
+ dma_flags = MT_TXD_PKT_INFO_80211;
+ if (wcid->hw_key_idx == 0xff)
+ dma_flags |= MT_TXD_PKT_INFO_WIV;
+
+ ret = mt76x0_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
+ if (ret)
+ return ret;
+
+ ret = mt76x0_dma_submit_tx(dev, skb, ep);
+
+ if (ret) {
+ ieee80211_free_txskb(dev->mt76.hw, skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mt76x0_kill_rx(struct mt76x0_dev *dev)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->rx_lock, flags);
+
+ for (i = 0; i < dev->rx_q.entries; i++) {
+ int next = dev->rx_q.end;
+
+ spin_unlock_irqrestore(&dev->rx_lock, flags);
+ usb_poison_urb(dev->rx_q.e[next].urb);
+ spin_lock_irqsave(&dev->rx_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&dev->rx_lock, flags);
+}
+
+static int mt76x0_submit_rx_buf(struct mt76x0_dev *dev,
+ struct mt76x0_dma_buf_rx *e, gfp_t gfp)
+{
+ struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+ u8 *buf = page_address(e->p);
+ unsigned pipe;
+ int ret;
+
+ pipe = usb_rcvbulkpipe(usb_dev, dev->in_ep[MT_EP_IN_PKT_RX]);
+
+ usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
+ mt76x0_complete_rx, dev);
+
+ trace_mt76x0_submit_urb(&dev->mt76, e->urb);
+ ret = usb_submit_urb(e->urb, gfp);
+ if (ret)
+ dev_err(dev->mt76.dev, "Error: submit RX URB failed:%d\n", ret);
+
+ return ret;
+}
+
+static int mt76x0_submit_rx(struct mt76x0_dev *dev)
+{
+ int i, ret;
+
+ for (i = 0; i < dev->rx_q.entries; i++) {
+ ret = mt76x0_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mt76x0_free_rx(struct mt76x0_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->rx_q.entries; i++) {
+ __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
+ usb_free_urb(dev->rx_q.e[i].urb);
+ }
+}
+
+static int mt76x0_alloc_rx(struct mt76x0_dev *dev)
+{
+ int i;
+
+ memset(&dev->rx_q, 0, sizeof(dev->rx_q));
+ dev->rx_q.dev = dev;
+ dev->rx_q.entries = N_RX_ENTRIES;
+
+ for (i = 0; i < N_RX_ENTRIES; i++) {
+ dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+ dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
+
+ if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void mt76x0_free_tx_queue(struct mt76x0_tx_queue *q)
+{
+ int i;
+
+ WARN_ON(q->used);
+
+ for (i = 0; i < q->entries; i++) {
+ usb_poison_urb(q->e[i].urb);
+ usb_free_urb(q->e[i].urb);
+ }
+}
+
+static void mt76x0_free_tx(struct mt76x0_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < __MT_EP_OUT_MAX; i++)
+ mt76x0_free_tx_queue(&dev->tx_q[i]);
+}
+
+static int mt76x0_alloc_tx_queue(struct mt76x0_dev *dev,
+ struct mt76x0_tx_queue *q)
+{
+ int i;
+
+ q->dev = dev;
+ q->entries = N_TX_ENTRIES;
+
+ for (i = 0; i < N_TX_ENTRIES; i++) {
+ q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!q->e[i].urb)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int mt76x0_alloc_tx(struct mt76x0_dev *dev)
+{
+ int i;
+
+ dev->tx_q = devm_kcalloc(dev->mt76.dev, __MT_EP_OUT_MAX,
+ sizeof(*dev->tx_q), GFP_KERNEL);
+
+ for (i = 0; i < __MT_EP_OUT_MAX; i++)
+ if (mt76x0_alloc_tx_queue(dev, &dev->tx_q[i]))
+ return -ENOMEM;
+
+ return 0;
+}
+
+int mt76x0_dma_init(struct mt76x0_dev *dev)
+{
+ int ret = -ENOMEM;
+
+ tasklet_init(&dev->tx_tasklet, mt76x0_tx_tasklet, (unsigned long) dev);
+ tasklet_init(&dev->rx_tasklet, mt76x0_rx_tasklet, (unsigned long) dev);
+
+ ret = mt76x0_alloc_tx(dev);
+ if (ret)
+ goto err;
+ ret = mt76x0_alloc_rx(dev);
+ if (ret)
+ goto err;
+
+ ret = mt76x0_submit_rx(dev);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ mt76x0_dma_cleanup(dev);
+ return ret;
+}
+
+void mt76x0_dma_cleanup(struct mt76x0_dev *dev)
+{
+ mt76x0_kill_rx(dev);
+
+ tasklet_kill(&dev->rx_tasklet);
+
+ mt76x0_free_rx(dev);
+ mt76x0_free_tx(dev);
+
+ tasklet_kill(&dev->tx_tasklet);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h
new file mode 100644
index 000000000000..891ce1c3461f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_DMA_H
+#define __MT76X0U_DMA_H
+
+#include <asm/unaligned.h>
+#include <linux/skbuff.h>
+
+#define MT_DMA_HDR_LEN 4
+#define MT_RX_INFO_LEN 4
+#define MT_FCE_INFO_LEN 4
+#define MT_DMA_HDRS (MT_DMA_HDR_LEN + MT_RX_INFO_LEN)
+
+/* Common Tx DMA descriptor fields */
+#define MT_TXD_INFO_LEN GENMASK(15, 0)
+#define MT_TXD_INFO_D_PORT GENMASK(29, 27)
+#define MT_TXD_INFO_TYPE GENMASK(31, 30)
+
+/* Tx DMA MCU command specific flags */
+#define MT_TXD_CMD_SEQ GENMASK(19, 16)
+#define MT_TXD_CMD_TYPE GENMASK(26, 20)
+
+enum mt76_msg_port {
+ WLAN_PORT,
+ CPU_RX_PORT,
+ CPU_TX_PORT,
+ HOST_PORT,
+ VIRTUAL_CPU_RX_PORT,
+ VIRTUAL_CPU_TX_PORT,
+ DISCARD,
+};
+
+enum mt76_info_type {
+ DMA_PACKET,
+ DMA_COMMAND,
+};
+
+/* Tx DMA packet specific flags */
+#define MT_TXD_PKT_INFO_NEXT_VLD BIT(16)
+#define MT_TXD_PKT_INFO_TX_BURST BIT(17)
+#define MT_TXD_PKT_INFO_80211 BIT(19)
+#define MT_TXD_PKT_INFO_TSO BIT(20)
+#define MT_TXD_PKT_INFO_CSO BIT(21)
+#define MT_TXD_PKT_INFO_WIV BIT(24)
+#define MT_TXD_PKT_INFO_QSEL GENMASK(26, 25)
+
+enum mt76_qsel {
+ MT_QSEL_MGMT,
+ MT_QSEL_HCCA,
+ MT_QSEL_EDCA,
+ MT_QSEL_EDCA_2,
+};
+
+
+static inline int mt76x0_dma_skb_wrap(struct sk_buff *skb,
+ enum mt76_msg_port d_port,
+ enum mt76_info_type type, u32 flags)
+{
+ u32 info;
+
+ /* Buffer layout:
+ * | 4B | xfer len | pad | 4B |
+ * | TXINFO | pkt/cmd | zero pad to 4B | zero |
+ *
+ * length field of TXINFO should be set to 'xfer len'.
+ */
+
+ info = flags |
+ FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
+ FIELD_PREP(MT_TXD_INFO_D_PORT, d_port) |
+ FIELD_PREP(MT_TXD_INFO_TYPE, type);
+
+ put_unaligned_le32(info, skb_push(skb, sizeof(info)));
+ return skb_put_padto(skb, round_up(skb->len, 4) + 4);
+}
+
+static inline int
+mt76x0_dma_skb_wrap_pkt(struct sk_buff *skb, enum mt76_qsel qsel, u32 flags)
+{
+ flags |= FIELD_PREP(MT_TXD_PKT_INFO_QSEL, qsel);
+ return mt76x0_dma_skb_wrap(skb, WLAN_PORT, DMA_PACKET, flags);
+}
+
+/* Common Rx DMA descriptor fields */
+#define MT_RXD_INFO_LEN GENMASK(13, 0)
+#define MT_RXD_INFO_PCIE_INTR BIT(24)
+#define MT_RXD_INFO_QSEL GENMASK(26, 25)
+#define MT_RXD_INFO_PORT GENMASK(29, 27)
+#define MT_RXD_INFO_TYPE GENMASK(31, 30)
+
+/* Rx DMA packet specific flags */
+#define MT_RXD_PKT_INFO_UDP_ERR BIT(16)
+#define MT_RXD_PKT_INFO_TCP_ERR BIT(17)
+#define MT_RXD_PKT_INFO_IP_ERR BIT(18)
+#define MT_RXD_PKT_INFO_PKT_80211 BIT(19)
+#define MT_RXD_PKT_INFO_L3L4_DONE BIT(20)
+#define MT_RXD_PKT_INFO_MAC_LEN GENMASK(23, 21)
+
+/* Rx DMA MCU command specific flags */
+#define MT_RXD_CMD_INFO_SELF_GEN BIT(15)
+#define MT_RXD_CMD_INFO_CMD_SEQ GENMASK(19, 16)
+#define MT_RXD_CMD_INFO_EVT_TYPE GENMASK(23, 20)
+
+enum mt76_evt_type {
+ CMD_DONE,
+ CMD_ERROR,
+ CMD_RETRY,
+ EVENT_PWR_RSP,
+ EVENT_WOW_RSP,
+ EVENT_CARRIER_DETECT_RSP,
+ EVENT_DFS_DETECT_RSP,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
new file mode 100644
index 000000000000..1ecd018f12b8
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -0,0 +1,445 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+#include "mt76x0.h"
+#include "eeprom.h"
+
+static bool
+field_valid(u8 val)
+{
+ return val != 0xff;
+}
+
+static s8
+field_validate(u8 val)
+{
+ if (!field_valid(val))
+ return 0;
+
+ return val;
+}
+
+static inline int
+sign_extend(u32 val, unsigned int size)
+{
+ bool sign = val & BIT(size - 1);
+
+ val &= BIT(size - 1) - 1;
+
+ return sign ? val : -val;
+}
+
+static int
+mt76x0_efuse_read(struct mt76x0_dev *dev, u16 addr, u8 *data,
+ enum mt76x0_eeprom_access_modes mode)
+{
+ u32 val;
+ int i;
+
+ val = mt76_rr(dev, MT_EFUSE_CTRL);
+ val &= ~(MT_EFUSE_CTRL_AIN |
+ MT_EFUSE_CTRL_MODE);
+ val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf) |
+ FIELD_PREP(MT_EFUSE_CTRL_MODE, mode) |
+ MT_EFUSE_CTRL_KICK;
+ mt76_wr(dev, MT_EFUSE_CTRL, val);
+
+ if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
+ return -ETIMEDOUT;
+
+ val = mt76_rr(dev, MT_EFUSE_CTRL);
+ if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
+ /* Parts of eeprom not in the usage map (0x80-0xc0,0xf0)
+ * will not return valid data but it's ok.
+ */
+ memset(data, 0xff, 16);
+ return 0;
+ }
+
+ for (i = 0; i < 4; i++) {
+ val = mt76_rr(dev, MT_EFUSE_DATA(i));
+ put_unaligned_le32(val, data + 4 * i);
+ }
+
+ return 0;
+}
+
+static int
+mt76x0_efuse_physical_size_check(struct mt76x0_dev *dev)
+{
+ const int map_reads = DIV_ROUND_UP(MT_EFUSE_USAGE_MAP_SIZE, 16);
+ u8 data[map_reads * 16];
+ int ret, i;
+ u32 start = 0, end = 0, cnt_free;
+
+ for (i = 0; i < map_reads; i++) {
+ ret = mt76x0_efuse_read(dev, MT_EE_USAGE_MAP_START + i * 16,
+ data + i * 16, MT_EE_PHYSICAL_READ);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < MT_EFUSE_USAGE_MAP_SIZE; i++)
+ if (!data[i]) {
+ if (!start)
+ start = MT_EE_USAGE_MAP_START + i;
+ end = MT_EE_USAGE_MAP_START + i;
+ }
+ cnt_free = end - start + 1;
+
+ if (MT_EFUSE_USAGE_MAP_SIZE - cnt_free < 5) {
+ dev_err(dev->mt76.dev, "Error: your device needs default EEPROM file and this driver doesn't support it!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+mt76x0_set_chip_cap(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ enum mt76x2_board_type { BOARD_TYPE_2GHZ = 1, BOARD_TYPE_5GHZ = 2 };
+ u16 nic_conf0 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_0);
+ u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
+
+ dev_dbg(dev->mt76.dev, "NIC_CONF0: %04x NIC_CONF1: %04x\n", nic_conf0, nic_conf1);
+
+ switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, nic_conf0)) {
+ case BOARD_TYPE_5GHZ:
+ dev->ee->has_5ghz = true;
+ break;
+ case BOARD_TYPE_2GHZ:
+ dev->ee->has_2ghz = true;
+ break;
+ default:
+ dev->ee->has_2ghz = true;
+ dev->ee->has_5ghz = true;
+ break;
+ }
+
+ dev_dbg(dev->mt76.dev, "Has 2GHZ %d 5GHZ %d\n", dev->ee->has_2ghz, dev->ee->has_5ghz);
+
+ if (!field_valid(nic_conf1 & 0xff))
+ nic_conf1 &= 0xff00;
+
+ if (nic_conf1 & MT_EE_NIC_CONF_1_HW_RF_CTRL)
+ dev_err(dev->mt76.dev,
+ "Error: this driver does not support HW RF ctrl\n");
+
+ if (!field_valid(nic_conf0 >> 8))
+ return;
+
+ if (FIELD_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 ||
+ FIELD_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1)
+ dev_err(dev->mt76.dev,
+ "Error: device has more than 1 RX/TX stream!\n");
+
+ dev->ee->pa_type = FIELD_GET(MT_EE_NIC_CONF_0_PA_TYPE, nic_conf0);
+ dev_dbg(dev->mt76.dev, "PA Type %d\n", dev->ee->pa_type);
+}
+
+static int
+mt76x0_set_macaddr(struct mt76x0_dev *dev, const u8 *eeprom)
+{
+ const void *src = eeprom + MT_EE_MAC_ADDR;
+
+ ether_addr_copy(dev->macaddr, src);
+
+ if (!is_valid_ether_addr(dev->macaddr)) {
+ eth_random_addr(dev->macaddr);
+ dev_info(dev->mt76.dev,
+ "Invalid MAC address, using random address %pM\n",
+ dev->macaddr);
+ }
+
+ mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr));
+ mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(dev->macaddr + 4) |
+ FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
+
+ return 0;
+}
+
+static void
+mt76x0_set_temp_offset(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ u8 temp = eeprom[MT_EE_TEMP_OFFSET];
+
+ if (field_valid(temp))
+ dev->ee->temp_off = sign_extend(temp, 8);
+ else
+ dev->ee->temp_off = -10;
+}
+
+static void
+mt76x0_set_country_reg(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ /* Note: - region 31 is not valid for mt76x0 (see rtmp_init.c)
+ * - comments in rtmp_def.h are incorrect (see rt_channel.c)
+ */
+ static const struct reg_channel_bounds chan_bounds[] = {
+ /* EEPROM country regions 0 - 7 */
+ { 1, 11 }, { 1, 13 }, { 10, 2 }, { 10, 4 },
+ { 14, 1 }, { 1, 14 }, { 3, 7 }, { 5, 9 },
+ /* EEPROM country regions 32 - 33 */
+ { 1, 11 }, { 1, 14 }
+ };
+ u8 val = eeprom[MT_EE_COUNTRY_REGION_2GHZ];
+ int idx = -1;
+
+ dev_dbg(dev->mt76.dev, "REG 2GHZ %u REG 5GHZ %u\n", val, eeprom[MT_EE_COUNTRY_REGION_5GHZ]);
+ if (val < 8)
+ idx = val;
+ if (val > 31 && val < 33)
+ idx = val - 32 + 8;
+
+ if (idx != -1)
+ dev_info(dev->mt76.dev,
+ "EEPROM country region %02hhx (channels %hhd-%hhd)\n",
+ val, chan_bounds[idx].start,
+ chan_bounds[idx].start + chan_bounds[idx].num - 1);
+ else
+ idx = 5; /* channels 1 - 14 */
+
+ dev->ee->reg = chan_bounds[idx];
+
+ /* TODO: country region 33 is special - phy should be set to B-mode
+ * before entering channel 14 (see sta/connect.c)
+ */
+}
+
+static void
+mt76x0_set_rf_freq_off(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ u8 comp;
+
+ dev->ee->rf_freq_off = field_validate(eeprom[MT_EE_FREQ_OFFSET]);
+ comp = field_validate(eeprom[MT_EE_FREQ_OFFSET_COMPENSATION]);
+
+ if (comp & BIT(7))
+ dev->ee->rf_freq_off -= comp & 0x7f;
+ else
+ dev->ee->rf_freq_off += comp;
+}
+
+static void
+mt76x0_set_lna_gain(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ s8 gain;
+
+ dev->ee->lna_gain_2ghz = eeprom[MT_EE_LNA_GAIN_2GHZ];
+ dev->ee->lna_gain_5ghz[0] = eeprom[MT_EE_LNA_GAIN_5GHZ_0];
+
+ gain = eeprom[MT_EE_LNA_GAIN_5GHZ_1];
+ if (gain == 0xff || gain == 0)
+ dev->ee->lna_gain_5ghz[1] = dev->ee->lna_gain_5ghz[0];
+ else
+ dev->ee->lna_gain_5ghz[1] = gain;
+
+ gain = eeprom[MT_EE_LNA_GAIN_5GHZ_2];
+ if (gain == 0xff || gain == 0)
+ dev->ee->lna_gain_5ghz[2] = dev->ee->lna_gain_5ghz[0];
+ else
+ dev->ee->lna_gain_5ghz[2] = gain;
+}
+
+static void
+mt76x0_set_rssi_offset(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ int i;
+ s8 *rssi_offset = dev->ee->rssi_offset_2ghz;
+
+ for (i = 0; i < 2; i++) {
+ rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET + i];
+
+ if (rssi_offset[i] < -10 || rssi_offset[i] > 10) {
+ dev_warn(dev->mt76.dev,
+ "Warning: EEPROM RSSI is invalid %02hhx\n",
+ rssi_offset[i]);
+ rssi_offset[i] = 0;
+ }
+ }
+
+ rssi_offset = dev->ee->rssi_offset_5ghz;
+
+ for (i = 0; i < 3; i++) {
+ rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET_5GHZ + i];
+
+ if (rssi_offset[i] < -10 || rssi_offset[i] > 10) {
+ dev_warn(dev->mt76.dev,
+ "Warning: EEPROM RSSI is invalid %02hhx\n",
+ rssi_offset[i]);
+ rssi_offset[i] = 0;
+ }
+ }
+}
+
+static u32
+calc_bw40_power_rate(u32 value, int delta)
+{
+ u32 ret = 0;
+ int i, tmp;
+
+ for (i = 0; i < 4; i++) {
+ tmp = s6_to_int((value >> i*8) & 0xff) + delta;
+ ret |= (u32)(int_to_s6(tmp)) << i*8;
+ }
+
+ return ret;
+}
+
+static s8
+get_delta(u8 val)
+{
+ s8 ret;
+
+ if (!field_valid(val) || !(val & BIT(7)))
+ return 0;
+
+ ret = val & 0x1f;
+ if (ret > 8)
+ ret = 8;
+ if (val & BIT(6))
+ ret = -ret;
+
+ return ret;
+}
+
+static void
+mt76x0_set_tx_power_per_rate(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ s8 bw40_delta_2g, bw40_delta_5g;
+ u32 val;
+ int i;
+
+ bw40_delta_2g = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40]);
+ bw40_delta_5g = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40 + 1]);
+
+ for (i = 0; i < 5; i++) {
+ val = get_unaligned_le32(eeprom + MT_EE_TX_POWER_BYRATE(i));
+
+ /* Skip last 16 bits. */
+ if (i == 4)
+ val &= 0x0000ffff;
+
+ dev->ee->tx_pwr_cfg_2g[i][0] = val;
+ dev->ee->tx_pwr_cfg_2g[i][1] = calc_bw40_power_rate(val, bw40_delta_2g);
+ }
+
+ /* Reading per rate tx power for 5 GHz band is a bit more complex. Note
+ * we mix 16 bit and 32 bit reads and sometimes do shifts.
+ */
+ val = get_unaligned_le16(eeprom + 0x120);
+ val <<= 16;
+ dev->ee->tx_pwr_cfg_5g[0][0] = val;
+ dev->ee->tx_pwr_cfg_5g[0][1] = calc_bw40_power_rate(val, bw40_delta_5g);
+
+ val = get_unaligned_le32(eeprom + 0x122);
+ dev->ee->tx_pwr_cfg_5g[1][0] = val;
+ dev->ee->tx_pwr_cfg_5g[1][1] = calc_bw40_power_rate(val, bw40_delta_5g);
+
+ val = get_unaligned_le16(eeprom + 0x126);
+ dev->ee->tx_pwr_cfg_5g[2][0] = val;
+ dev->ee->tx_pwr_cfg_5g[2][1] = calc_bw40_power_rate(val, bw40_delta_5g);
+
+ val = get_unaligned_le16(eeprom + 0xec);
+ val <<= 16;
+ dev->ee->tx_pwr_cfg_5g[3][0] = val;
+ dev->ee->tx_pwr_cfg_5g[3][1] = calc_bw40_power_rate(val, bw40_delta_5g);
+
+ val = get_unaligned_le16(eeprom + 0xee);
+ dev->ee->tx_pwr_cfg_5g[4][0] = val;
+ dev->ee->tx_pwr_cfg_5g[4][1] = calc_bw40_power_rate(val, bw40_delta_5g);
+}
+
+static void
+mt76x0_set_tx_power_per_chan(struct mt76x0_dev *dev, u8 *eeprom)
+{
+ int i;
+ u8 tx_pwr;
+
+ for (i = 0; i < 14; i++) {
+ tx_pwr = eeprom[MT_EE_TX_POWER_OFFSET_2GHZ + i];
+ if (tx_pwr <= 0x3f && tx_pwr > 0)
+ dev->ee->tx_pwr_per_chan[i] = tx_pwr;
+ else
+ dev->ee->tx_pwr_per_chan[i] = 5;
+ }
+
+ for (i = 0; i < 40; i++) {
+ tx_pwr = eeprom[MT_EE_TX_POWER_OFFSET_5GHZ + i];
+ if (tx_pwr <= 0x3f && tx_pwr > 0)
+ dev->ee->tx_pwr_per_chan[14 + i] = tx_pwr;
+ else
+ dev->ee->tx_pwr_per_chan[14 + i] = 5;
+ }
+
+ dev->ee->tx_pwr_per_chan[54] = dev->ee->tx_pwr_per_chan[22];
+ dev->ee->tx_pwr_per_chan[55] = dev->ee->tx_pwr_per_chan[28];
+ dev->ee->tx_pwr_per_chan[56] = dev->ee->tx_pwr_per_chan[34];
+ dev->ee->tx_pwr_per_chan[57] = dev->ee->tx_pwr_per_chan[44];
+}
+
+int
+mt76x0_eeprom_init(struct mt76x0_dev *dev)
+{
+ u8 *eeprom;
+ int i, ret;
+
+ ret = mt76x0_efuse_physical_size_check(dev);
+ if (ret)
+ return ret;
+
+ dev->ee = devm_kzalloc(dev->mt76.dev, sizeof(*dev->ee), GFP_KERNEL);
+ if (!dev->ee)
+ return -ENOMEM;
+
+ eeprom = kmalloc(MT76X0_EEPROM_SIZE, GFP_KERNEL);
+ if (!eeprom)
+ return -ENOMEM;
+
+ for (i = 0; i + 16 <= MT76X0_EEPROM_SIZE; i += 16) {
+ ret = mt76x0_efuse_read(dev, i, eeprom + i, MT_EE_READ);
+ if (ret)
+ goto out;
+ }
+
+ if (eeprom[MT_EE_VERSION_EE] > MT76X0U_EE_MAX_VER)
+ dev_warn(dev->mt76.dev,
+ "Warning: unsupported EEPROM version %02hhx\n",
+ eeprom[MT_EE_VERSION_EE]);
+ dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n",
+ eeprom[MT_EE_VERSION_EE], eeprom[MT_EE_VERSION_FAE]);
+
+ mt76x0_set_macaddr(dev, eeprom);
+ mt76x0_set_chip_cap(dev, eeprom);
+ mt76x0_set_country_reg(dev, eeprom);
+ mt76x0_set_rf_freq_off(dev, eeprom);
+ mt76x0_set_temp_offset(dev, eeprom);
+ mt76x0_set_lna_gain(dev, eeprom);
+ mt76x0_set_rssi_offset(dev, eeprom);
+ dev->chainmask = 0x0101;
+
+ mt76x0_set_tx_power_per_rate(dev, eeprom);
+ mt76x0_set_tx_power_per_chan(dev, eeprom);
+
+out:
+ kfree(eeprom);
+ return ret;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
new file mode 100644
index 000000000000..e37b573aed7b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_EEPROM_H
+#define __MT76X0U_EEPROM_H
+
+struct mt76x0_dev;
+
+#define MT76X0U_EE_MAX_VER 0x0c
+#define MT76X0_EEPROM_SIZE 512
+
+#define MT76X0U_DEFAULT_TX_POWER 6
+
+enum mt76_eeprom_field {
+ MT_EE_CHIP_ID = 0x00,
+ MT_EE_VERSION_FAE = 0x02,
+ MT_EE_VERSION_EE = 0x03,
+ MT_EE_MAC_ADDR = 0x04,
+ MT_EE_NIC_CONF_0 = 0x34,
+ MT_EE_NIC_CONF_1 = 0x36,
+ MT_EE_COUNTRY_REGION_5GHZ = 0x38,
+ MT_EE_COUNTRY_REGION_2GHZ = 0x39,
+ MT_EE_FREQ_OFFSET = 0x3a,
+ MT_EE_NIC_CONF_2 = 0x42,
+
+ MT_EE_LNA_GAIN_2GHZ = 0x44,
+ MT_EE_LNA_GAIN_5GHZ_0 = 0x45,
+ MT_EE_RSSI_OFFSET = 0x46,
+ MT_EE_RSSI_OFFSET_5GHZ = 0x4a,
+ MT_EE_LNA_GAIN_5GHZ_1 = 0x49,
+ MT_EE_LNA_GAIN_5GHZ_2 = 0x4d,
+
+ MT_EE_TX_POWER_DELTA_BW40 = 0x50,
+
+ MT_EE_TX_POWER_OFFSET_2GHZ = 0x52,
+
+ MT_EE_TX_TSSI_SLOPE = 0x6e,
+ MT_EE_TX_TSSI_OFFSET_GROUP = 0x6f,
+ MT_EE_TX_TSSI_OFFSET = 0x76,
+
+ MT_EE_TX_POWER_OFFSET_5GHZ = 0x78,
+
+ MT_EE_TEMP_OFFSET = 0xd1,
+ MT_EE_FREQ_OFFSET_COMPENSATION = 0xdb,
+ MT_EE_TX_POWER_BYRATE_BASE = 0xde,
+
+ MT_EE_TX_POWER_BYRATE_BASE_5GHZ = 0x120,
+
+ MT_EE_USAGE_MAP_START = 0x1e0,
+ MT_EE_USAGE_MAP_END = 0x1fc,
+};
+
+#define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0)
+#define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4)
+#define MT_EE_NIC_CONF_0_PA_TYPE GENMASK(9, 8)
+#define MT_EE_NIC_CONF_0_BOARD_TYPE GENMASK(13, 12)
+
+#define MT_EE_NIC_CONF_1_HW_RF_CTRL BIT(0)
+#define MT_EE_NIC_CONF_1_TEMP_TX_ALC BIT(1)
+#define MT_EE_NIC_CONF_1_LNA_EXT_2G BIT(2)
+#define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3)
+#define MT_EE_NIC_CONF_1_TX_ALC_EN BIT(13)
+
+#define MT_EE_NIC_CONF_2_RX_STREAM GENMASK(3, 0)
+#define MT_EE_NIC_CONF_2_TX_STREAM GENMASK(7, 4)
+#define MT_EE_NIC_CONF_2_HW_ANTDIV BIT(8)
+#define MT_EE_NIC_CONF_2_XTAL_OPTION GENMASK(10, 9)
+#define MT_EE_NIC_CONF_2_TEMP_DISABLE BIT(11)
+#define MT_EE_NIC_CONF_2_COEX_METHOD GENMASK(15, 13)
+
+#define MT_EE_TX_POWER_BYRATE(i) (MT_EE_TX_POWER_BYRATE_BASE + \
+ (i) * 4)
+
+#define MT_EFUSE_USAGE_MAP_SIZE (MT_EE_USAGE_MAP_END - \
+ MT_EE_USAGE_MAP_START + 1)
+
+enum mt76x0_eeprom_access_modes {
+ MT_EE_READ = 0,
+ MT_EE_PHYSICAL_READ = 1,
+};
+
+struct reg_channel_bounds {
+ u8 start;
+ u8 num;
+};
+
+struct mt76x0_eeprom_params {
+ u8 rf_freq_off;
+ s16 temp_off;
+ s8 rssi_offset_2ghz[2];
+ s8 rssi_offset_5ghz[3];
+ s8 lna_gain_2ghz;
+ s8 lna_gain_5ghz[3];
+ u8 pa_type;
+
+ /* TX_PWR_CFG_* values from EEPROM for 20 and 40 Mhz bandwidths. */
+ u32 tx_pwr_cfg_2g[5][2];
+ u32 tx_pwr_cfg_5g[5][2];
+
+ u8 tx_pwr_per_chan[58];
+
+ struct reg_channel_bounds reg;
+
+ bool has_2ghz;
+ bool has_5ghz;
+};
+
+int mt76x0_eeprom_init(struct mt76x0_dev *dev);
+
+static inline u32 s6_validate(u32 reg)
+{
+ WARN_ON(reg & ~GENMASK(5, 0));
+ return reg & GENMASK(5, 0);
+}
+
+static inline int s6_to_int(u32 reg)
+{
+ int s6;
+
+ s6 = s6_validate(reg);
+ if (s6 & BIT(5))
+ s6 -= BIT(6);
+
+ return s6;
+}
+
+static inline u32 int_to_s6(int val)
+{
+ if (val < -0x20)
+ return 0x20;
+ if (val > 0x1f)
+ return 0x1f;
+
+ return val & 0x3f;
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
new file mode 100644
index 000000000000..7cdb3e740522
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -0,0 +1,720 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "eeprom.h"
+#include "trace.h"
+#include "mcu.h"
+#include "usb.h"
+
+#include "initvals.h"
+
+static void
+mt76x0_set_wlan_state(struct mt76x0_dev *dev, u32 val, bool enable)
+{
+ int i;
+
+ /* Note: we don't turn off WLAN_CLK because that makes the device
+ * not respond properly on the probe path.
+ * In case anyone (PSM?) wants to use this function we can
+ * bring the clock stuff back and fixup the probe path.
+ */
+
+ if (enable)
+ val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
+ MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+ else
+ val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN);
+
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ if (!enable)
+ return;
+
+ for (i = 200; i; i--) {
+ val = mt76_rr(dev, MT_CMB_CTRL);
+
+ if (val & MT_CMB_CTRL_XTAL_RDY && val & MT_CMB_CTRL_PLL_LD)
+ break;
+
+ udelay(20);
+ }
+
+ /* Note: vendor driver tries to disable/enable wlan here and retry
+ * but the code which does it is so buggy it must have never
+ * triggered, so don't bother.
+ */
+ if (!i)
+ dev_err(dev->mt76.dev, "Error: PLL and XTAL check failed!\n");
+}
+
+void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset)
+{
+ u32 val;
+
+ mutex_lock(&dev->hw_atomic_mutex);
+
+ val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+ if (reset) {
+ val |= MT_WLAN_FUN_CTRL_GPIO_OUT_EN;
+ val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
+
+ if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
+ val |= (MT_WLAN_FUN_CTRL_WLAN_RESET |
+ MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ val &= ~(MT_WLAN_FUN_CTRL_WLAN_RESET |
+ MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
+ }
+ }
+
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ mt76x0_set_wlan_state(dev, val, enable);
+
+ mutex_unlock(&dev->hw_atomic_mutex);
+}
+
+static void mt76x0_reset_csr_bbp(struct mt76x0_dev *dev)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_PBF_SYS_CTRL);
+ val &= ~0x2000;
+ mt76_wr(dev, MT_PBF_SYS_CTRL, val);
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR |
+ MT_MAC_SYS_CTRL_RESET_BBP);
+
+ msleep(200);
+}
+
+static void mt76x0_init_usb_dma(struct mt76x0_dev *dev)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_USB_DMA_CFG);
+
+ val |= FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, MT_USB_AGGR_TIMEOUT) |
+ FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_LMT, MT_USB_AGGR_SIZE_LIMIT) |
+ MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN;
+ if (dev->in_max_packet == 512)
+ val |= MT_USB_DMA_CFG_RX_BULK_AGG_EN;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
+
+ val = mt76_rr(dev, MT_COM_REG0);
+ if (val & 1)
+ dev_dbg(dev->mt76.dev, "MCU not ready\n");
+
+ val = mt76_rr(dev, MT_USB_DMA_CFG);
+
+ val |= MT_USB_DMA_CFG_RX_DROP_OR_PADDING;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
+ val &= ~MT_USB_DMA_CFG_RX_DROP_OR_PADDING;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
+}
+
+#define RANDOM_WRITE(dev, tab) \
+ mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN, tab, ARRAY_SIZE(tab));
+
+static int mt76x0_init_bbp(struct mt76x0_dev *dev)
+{
+ int ret, i;
+
+ ret = mt76x0_wait_bbp_ready(dev);
+ if (ret)
+ return ret;
+
+ RANDOM_WRITE(dev, mt76x0_bbp_init_tab);
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_bbp_switch_tab); i++) {
+ const struct mt76x0_bbp_switch_item *item = &mt76x0_bbp_switch_tab[i];
+ const struct mt76_reg_pair *pair = &item->reg_pair;
+
+ if (((RF_G_BAND | RF_BW_20) & item->bw_band) == (RF_G_BAND | RF_BW_20))
+ mt76_wr(dev, pair->reg, pair->value);
+ }
+
+ RANDOM_WRITE(dev, mt76x0_dcoc_tab);
+
+ return 0;
+}
+
+static void
+mt76_init_beacon_offsets(struct mt76x0_dev *dev)
+{
+ u16 base = MT_BEACON_BASE;
+ u32 regs[4] = {};
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ u16 addr = dev->beacon_offsets[i];
+
+ regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4));
+ }
+
+ for (i = 0; i < 4; i++)
+ mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
+}
+
+static void mt76x0_init_mac_registers(struct mt76x0_dev *dev)
+{
+ u32 reg;
+
+ RANDOM_WRITE(dev, common_mac_reg_table);
+
+ mt76_init_beacon_offsets(dev);
+
+ /* Enable PBF and MAC clock SYS_CTRL[11:10] = 0x3 */
+ RANDOM_WRITE(dev, mt76x0_mac_reg_table);
+
+ /* Release BBP and MAC reset MAC_SYS_CTRL[1:0] = 0x0 */
+ reg = mt76_rr(dev, MT_MAC_SYS_CTRL);
+ reg &= ~0x3;
+ mt76_wr(dev, MT_MAC_SYS_CTRL, reg);
+
+ if (is_mt7610e(dev)) {
+ /* Disable COEX_EN */
+ reg = mt76_rr(dev, MT_COEXCFG0);
+ reg &= 0xFFFFFFFE;
+ mt76_wr(dev, MT_COEXCFG0, reg);
+ }
+
+ /* Set 0x141C[15:12]=0xF */
+ reg = mt76_rr(dev, MT_EXT_CCA_CFG);
+ reg |= 0x0000F000;
+ mt76_wr(dev, MT_EXT_CCA_CFG, reg);
+
+ mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
+
+ /*
+ TxRing 9 is for Mgmt frame.
+ TxRing 8 is for In-band command frame.
+ WMM_RG0_TXQMA: This register setting is for FCE to define the rule of TxRing 9.
+ WMM_RG1_TXQMA: This register setting is for FCE to define the rule of TxRing 8.
+ */
+ reg = mt76_rr(dev, MT_WMM_CTRL);
+ reg &= ~0x000003FF;
+ reg |= 0x00000201;
+ mt76_wr(dev, MT_WMM_CTRL, reg);
+
+ /* TODO: Probably not needed */
+ mt76_wr(dev, 0x7028, 0);
+ mt76_wr(dev, 0x7010, 0);
+ mt76_wr(dev, 0x7024, 0);
+ msleep(10);
+}
+
+static int mt76x0_init_wcid_mem(struct mt76x0_dev *dev)
+{
+ u32 *vals;
+ int i, ret;
+
+ vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
+ if (!vals)
+ return -ENOMEM;
+
+ for (i = 0; i < N_WCIDS; i++) {
+ vals[i * 2] = 0xffffffff;
+ vals[i * 2 + 1] = 0x00ffffff;
+ }
+
+ ret = mt76x0_burst_write_regs(dev, MT_WCID_ADDR_BASE,
+ vals, N_WCIDS * 2);
+ kfree(vals);
+
+ return ret;
+}
+
+static int mt76x0_init_key_mem(struct mt76x0_dev *dev)
+{
+ u32 vals[4] = {};
+
+ return mt76x0_burst_write_regs(dev, MT_SKEY_MODE_BASE_0,
+ vals, ARRAY_SIZE(vals));
+}
+
+static int mt76x0_init_wcid_attr_mem(struct mt76x0_dev *dev)
+{
+ u32 *vals;
+ int i, ret;
+
+ vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
+ if (!vals)
+ return -ENOMEM;
+
+ for (i = 0; i < N_WCIDS * 2; i++)
+ vals[i] = 1;
+
+ ret = mt76x0_burst_write_regs(dev, MT_WCID_ATTR_BASE,
+ vals, N_WCIDS * 2);
+ kfree(vals);
+
+ return ret;
+}
+
+static void mt76x0_reset_counters(struct mt76x0_dev *dev)
+{
+ mt76_rr(dev, MT_RX_STA_CNT0);
+ mt76_rr(dev, MT_RX_STA_CNT1);
+ mt76_rr(dev, MT_RX_STA_CNT2);
+ mt76_rr(dev, MT_TX_STA_CNT0);
+ mt76_rr(dev, MT_TX_STA_CNT1);
+ mt76_rr(dev, MT_TX_STA_CNT2);
+}
+
+int mt76x0_mac_start(struct mt76x0_dev *dev)
+{
+ mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+
+ if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 200000))
+ return -ETIMEDOUT;
+
+ dev->rxfilter = MT_RX_FILTR_CFG_CRC_ERR |
+ MT_RX_FILTR_CFG_PHY_ERR | MT_RX_FILTR_CFG_PROMISC |
+ MT_RX_FILTR_CFG_VER_ERR | MT_RX_FILTR_CFG_DUP |
+ MT_RX_FILTR_CFG_CFACK | MT_RX_FILTR_CFG_CFEND |
+ MT_RX_FILTR_CFG_ACK | MT_RX_FILTR_CFG_CTS |
+ MT_RX_FILTR_CFG_RTS | MT_RX_FILTR_CFG_PSPOLL |
+ MT_RX_FILTR_CFG_BA | MT_RX_FILTR_CFG_CTRL_RSV;
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
+
+ if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 50))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void mt76x0_mac_stop_hw(struct mt76x0_dev *dev)
+{
+ int i, ok;
+
+ if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ return;
+
+ mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_BEACON_TX);
+
+ if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000))
+ dev_warn(dev->mt76.dev, "Warning: TX DMA did not stop!\n");
+
+ /* Page count on TxQ */
+ i = 200;
+ while (i-- && ((mt76_rr(dev, 0x0438) & 0xffffffff) ||
+ (mt76_rr(dev, 0x0a30) & 0x000000ff) ||
+ (mt76_rr(dev, 0x0a34) & 0x00ff00ff)))
+ msleep(10);
+
+ if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX, 0, 1000))
+ dev_warn(dev->mt76.dev, "Warning: MAC TX did not stop!\n");
+
+ mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX |
+ MT_MAC_SYS_CTRL_ENABLE_TX);
+
+ /* Page count on RxQ */
+ ok = 0;
+ i = 200;
+ while (i--) {
+ if (!(mt76_rr(dev, MT_RXQ_STA) & 0x00ff0000) &&
+ !mt76_rr(dev, 0x0a30) &&
+ !mt76_rr(dev, 0x0a34)) {
+ if (ok++ > 5)
+ break;
+ continue;
+ }
+ msleep(1);
+ }
+
+ if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 1000))
+ dev_warn(dev->mt76.dev, "Warning: MAC RX did not stop!\n");
+
+ if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_RX_BUSY, 0, 1000))
+ dev_warn(dev->mt76.dev, "Warning: RX DMA did not stop!\n");
+}
+
+void mt76x0_mac_stop(struct mt76x0_dev *dev)
+{
+ mt76x0_mac_stop_hw(dev);
+ flush_delayed_work(&dev->stat_work);
+ cancel_delayed_work_sync(&dev->stat_work);
+}
+
+static void mt76x0_stop_hardware(struct mt76x0_dev *dev)
+{
+ mt76x0_chip_onoff(dev, false, false);
+}
+
+int mt76x0_init_hardware(struct mt76x0_dev *dev)
+{
+ static const u16 beacon_offsets[16] = {
+ /* 512 byte per beacon */
+ 0xc000, 0xc200, 0xc400, 0xc600,
+ 0xc800, 0xca00, 0xcc00, 0xce00,
+ 0xd000, 0xd200, 0xd400, 0xd600,
+ 0xd800, 0xda00, 0xdc00, 0xde00
+ };
+ int ret;
+
+ dev->beacon_offsets = beacon_offsets;
+
+ mt76x0_chip_onoff(dev, true, true);
+
+ ret = mt76x0_wait_asic_ready(dev);
+ if (ret)
+ goto err;
+ ret = mt76x0_mcu_init(dev);
+ if (ret)
+ goto err;
+
+ if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100)) {
+ ret = -EIO;
+ goto err;
+ }
+
+ /* Wait for ASIC ready after FW load. */
+ ret = mt76x0_wait_asic_ready(dev);
+ if (ret)
+ goto err;
+
+ mt76x0_reset_csr_bbp(dev);
+ mt76x0_init_usb_dma(dev);
+
+ mt76_wr(dev, MT_HEADER_TRANS_CTRL_REG, 0x0);
+ mt76_wr(dev, MT_TSO_CTRL, 0x0);
+
+ ret = mt76x0_mcu_cmd_init(dev);
+ if (ret)
+ goto err;
+ ret = mt76x0_dma_init(dev);
+ if (ret)
+ goto err_mcu;
+
+ mt76x0_init_mac_registers(dev);
+
+ if (!mt76_poll_msec(dev, MT_MAC_STATUS,
+ MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, 0, 1000)) {
+ ret = -EIO;
+ goto err_rx;
+ }
+
+ ret = mt76x0_init_bbp(dev);
+ if (ret)
+ goto err_rx;
+
+ ret = mt76x0_init_wcid_mem(dev);
+ if (ret)
+ goto err_rx;
+ ret = mt76x0_init_key_mem(dev);
+ if (ret)
+ goto err_rx;
+ ret = mt76x0_init_wcid_attr_mem(dev);
+ if (ret)
+ goto err_rx;
+
+ mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE |
+ MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_BEACON_TX));
+
+ mt76x0_reset_counters(dev);
+
+ mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
+
+ mt76_wr(dev, MT_TXOP_CTRL_CFG,
+ FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
+ FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
+
+ ret = mt76x0_eeprom_init(dev);
+ if (ret)
+ goto err_rx;
+
+ mt76x0_phy_init(dev);
+ return 0;
+
+err_rx:
+ mt76x0_dma_cleanup(dev);
+err_mcu:
+ mt76x0_mcu_cmd_deinit(dev);
+err:
+ mt76x0_chip_onoff(dev, false, false);
+ return ret;
+}
+
+void mt76x0_cleanup(struct mt76x0_dev *dev)
+{
+ if (!test_and_clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+ return;
+
+ mt76x0_stop_hardware(dev);
+ mt76x0_dma_cleanup(dev);
+ mt76x0_mcu_cmd_deinit(dev);
+}
+
+struct mt76x0_dev *mt76x0_alloc_device(struct device *pdev)
+{
+ struct ieee80211_hw *hw;
+ struct mt76x0_dev *dev;
+
+ hw = ieee80211_alloc_hw(sizeof(*dev), &mt76x0_ops);
+ if (!hw)
+ return NULL;
+
+ dev = hw->priv;
+ dev->mt76.dev = pdev;
+ dev->mt76.hw = hw;
+ mutex_init(&dev->usb_ctrl_mtx);
+ mutex_init(&dev->reg_atomic_mutex);
+ mutex_init(&dev->hw_atomic_mutex);
+ mutex_init(&dev->mutex);
+ spin_lock_init(&dev->tx_lock);
+ spin_lock_init(&dev->rx_lock);
+ spin_lock_init(&dev->mt76.lock);
+ spin_lock_init(&dev->mac_lock);
+ spin_lock_init(&dev->con_mon_lock);
+ atomic_set(&dev->avg_ampdu_len, 1);
+ skb_queue_head_init(&dev->tx_skb_done);
+
+ dev->stat_wq = alloc_workqueue("mt76x0", WQ_UNBOUND, 0);
+ if (!dev->stat_wq) {
+ ieee80211_free_hw(hw);
+ return NULL;
+ }
+
+ return dev;
+}
+
+#define CHAN2G(_idx, _freq) { \
+ .band = NL80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 30, \
+}
+
+static const struct ieee80211_channel mt76_channels_2ghz[] = {
+ CHAN2G(1, 2412),
+ CHAN2G(2, 2417),
+ CHAN2G(3, 2422),
+ CHAN2G(4, 2427),
+ CHAN2G(5, 2432),
+ CHAN2G(6, 2437),
+ CHAN2G(7, 2442),
+ CHAN2G(8, 2447),
+ CHAN2G(9, 2452),
+ CHAN2G(10, 2457),
+ CHAN2G(11, 2462),
+ CHAN2G(12, 2467),
+ CHAN2G(13, 2472),
+ CHAN2G(14, 2484),
+};
+
+#define CHAN5G(_idx, _freq) { \
+ .band = NL80211_BAND_5GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 30, \
+}
+
+static const struct ieee80211_channel mt76_channels_5ghz[] = {
+ CHAN5G(36, 5180),
+ CHAN5G(40, 5200),
+ CHAN5G(44, 5220),
+ CHAN5G(46, 5230),
+ CHAN5G(48, 5240),
+ CHAN5G(52, 5260),
+ CHAN5G(56, 5280),
+ CHAN5G(60, 5300),
+ CHAN5G(64, 5320),
+
+ CHAN5G(100, 5500),
+ CHAN5G(104, 5520),
+ CHAN5G(108, 5540),
+ CHAN5G(112, 5560),
+ CHAN5G(116, 5580),
+ CHAN5G(120, 5600),
+ CHAN5G(124, 5620),
+ CHAN5G(128, 5640),
+ CHAN5G(132, 5660),
+ CHAN5G(136, 5680),
+ CHAN5G(140, 5700),
+};
+
+#define CCK_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
+ .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \
+ .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \
+}
+
+#define OFDM_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \
+ .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \
+}
+
+static struct ieee80211_rate mt76_rates[] = {
+ CCK_RATE(0, 10),
+ CCK_RATE(1, 20),
+ CCK_RATE(2, 55),
+ CCK_RATE(3, 110),
+ OFDM_RATE(0, 60),
+ OFDM_RATE(1, 90),
+ OFDM_RATE(2, 120),
+ OFDM_RATE(3, 180),
+ OFDM_RATE(4, 240),
+ OFDM_RATE(5, 360),
+ OFDM_RATE(6, 480),
+ OFDM_RATE(7, 540),
+};
+
+static int
+mt76_init_sband(struct mt76x0_dev *dev, struct ieee80211_supported_band *sband,
+ const struct ieee80211_channel *chan, int n_chan,
+ struct ieee80211_rate *rates, int n_rates)
+{
+ struct ieee80211_sta_ht_cap *ht_cap;
+ void *chanlist;
+ int size;
+
+ size = n_chan * sizeof(*chan);
+ chanlist = devm_kmemdup(dev->mt76.dev, chan, size, GFP_KERNEL);
+ if (!chanlist)
+ return -ENOMEM;
+
+ sband->channels = chanlist;
+ sband->n_channels = n_chan;
+ sband->bitrates = rates;
+ sband->n_bitrates = n_rates;
+
+ ht_cap = &sband->ht_cap;
+ ht_cap->ht_supported = true;
+ ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40 |
+ (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
+ ht_cap->mcs.rx_mask[0] = 0xff;
+ ht_cap->mcs.rx_mask[4] = 0x1;
+ ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_2;
+
+ return 0;
+}
+
+static int
+mt76_init_sband_2g(struct mt76x0_dev *dev)
+{
+ dev->mt76.hw->wiphy->bands[NL80211_BAND_2GHZ] = &dev->mt76.sband_2g.sband;
+
+ WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num >
+ ARRAY_SIZE(mt76_channels_2ghz));
+
+
+ return mt76_init_sband(dev, &dev->mt76.sband_2g.sband,
+ mt76_channels_2ghz, ARRAY_SIZE(mt76_channels_2ghz),
+ mt76_rates, ARRAY_SIZE(mt76_rates));
+}
+
+static int
+mt76_init_sband_5g(struct mt76x0_dev *dev)
+{
+ dev->mt76.hw->wiphy->bands[NL80211_BAND_5GHZ] = &dev->mt76.sband_5g.sband;
+
+ return mt76_init_sband(dev, &dev->mt76.sband_5g.sband,
+ mt76_channels_5ghz, ARRAY_SIZE(mt76_channels_5ghz),
+ mt76_rates + 4, ARRAY_SIZE(mt76_rates) - 4);
+}
+
+
+int mt76x0_register_device(struct mt76x0_dev *dev)
+{
+ struct ieee80211_hw *hw = dev->mt76.hw;
+ struct wiphy *wiphy = hw->wiphy;
+ int ret;
+
+ /* Reserve WCID 0 for mcast - thanks to this APs WCID will go to
+ * entry no. 1 like it does in the vendor driver.
+ */
+ dev->wcid_mask[0] |= 1;
+
+ /* init fake wcid for monitor interfaces */
+ dev->mon_wcid = devm_kmalloc(dev->mt76.dev, sizeof(*dev->mon_wcid),
+ GFP_KERNEL);
+ if (!dev->mon_wcid)
+ return -ENOMEM;
+ dev->mon_wcid->idx = 0xff;
+ dev->mon_wcid->hw_key_idx = -1;
+
+ SET_IEEE80211_DEV(hw, dev->mt76.dev);
+
+ hw->queues = 4;
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+ hw->max_rates = 1;
+ hw->max_report_rates = 7;
+ hw->max_rate_tries = 1;
+
+ hw->sta_data_size = sizeof(struct mt76_sta);
+ hw->vif_data_size = sizeof(struct mt76_vif);
+
+ SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
+
+ wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+
+ if (dev->ee->has_2ghz) {
+ ret = mt76_init_sband_2g(dev);
+ if (ret)
+ return ret;
+ }
+
+ if (dev->ee->has_5ghz) {
+ ret = mt76_init_sband_5g(dev);
+ if (ret)
+ return ret;
+ }
+
+ dev->mt76.chandef.chan = &dev->mt76.sband_2g.sband.channels[0];
+
+ INIT_DELAYED_WORK(&dev->mac_work, mt76x0_mac_work);
+ INIT_DELAYED_WORK(&dev->stat_work, mt76x0_tx_stat);
+
+ ret = ieee80211_register_hw(hw);
+ if (ret)
+ return ret;
+
+ mt76x0_init_debugfs(dev);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
new file mode 100644
index 000000000000..24afcfd94b4e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
@@ -0,0 +1,282 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_INITVALS_H
+#define __MT76X0U_INITVALS_H
+
+#include "phy.h"
+
+static const struct mt76_reg_pair common_mac_reg_table[] = {
+#if 1
+ {MT_BCN_OFFSET(0), 0xf8f0e8e0}, /* 0x3800(e0), 0x3A00(e8), 0x3C00(f0), 0x3E00(f8), 512B for each beacon */
+ {MT_BCN_OFFSET(1), 0x6f77d0c8}, /* 0x3200(c8), 0x3400(d0), 0x1DC0(77), 0x1BC0(6f), 512B for each beacon */
+#endif
+
+ {MT_LEGACY_BASIC_RATE, 0x0000013f}, /* Basic rate set bitmap*/
+ {MT_HT_BASIC_RATE, 0x00008003}, /* Basic HT rate set , 20M, MCS=3, MM. Format is the same as in TXWI.*/
+ {MT_MAC_SYS_CTRL, 0x00}, /* 0x1004, , default Disable RX*/
+ {MT_RX_FILTR_CFG, 0x17f97}, /*0x1400 , RX filter control, */
+ {MT_BKOFF_SLOT_CFG, 0x209}, /* default set short slot time, CC_DELAY_TIME should be 2 */
+ /*{TX_SW_CFG0, 0x40a06}, Gary,2006-08-23 */
+ {MT_TX_SW_CFG0, 0x0}, /* Gary,2008-05-21 for CWC test */
+ {MT_TX_SW_CFG1, 0x80606}, /* Gary,2006-08-23 */
+ {MT_TX_LINK_CFG, 0x1020}, /* Gary,2006-08-23 */
+ /*{TX_TIMEOUT_CFG, 0x00182090}, CCK has some problem. So increase timieout value. 2006-10-09 MArvek RT*/
+ {MT_TX_TIMEOUT_CFG, 0x000a2090}, /* CCK has some problem. So increase timieout value. 2006-10-09 MArvek RT , Modify for 2860E ,2007-08-01*/
+ {MT_MAX_LEN_CFG, 0xa0fff | 0x00001000}, /* 0x3018, MAX frame length. Max PSDU = 16kbytes.*/
+ {MT_LED_CFG, 0x7f031e46}, /* Gary, 2006-08-23*/
+
+ {MT_PBF_TX_MAX_PCNT, 0x1fbf1f1f /*0xbfbf3f1f*/},
+ {MT_PBF_RX_MAX_PCNT, 0x9f},
+
+ /*{TX_RTY_CFG, 0x6bb80408}, Jan, 2006/11/16*/
+/* WMM_ACM_SUPPORT */
+/* {TX_RTY_CFG, 0x6bb80101}, sample*/
+ {MT_TX_RETRY_CFG, 0x47d01f0f}, /* Jan, 2006/11/16, Set TxWI->ACK =0 in Probe Rsp Modify for 2860E ,2007-08-03*/
+
+ {MT_AUTO_RSP_CFG, 0x00000013}, /* Initial Auto_Responder, because QA will turn off Auto-Responder*/
+ {MT_CCK_PROT_CFG, 0x05740003 /*0x01740003*/}, /* Initial Auto_Responder, because QA will turn off Auto-Responder. And RTS threshold is enabled. */
+ {MT_OFDM_PROT_CFG, 0x05740003 /*0x01740003*/}, /* Initial Auto_Responder, because QA will turn off Auto-Responder. And RTS threshold is enabled. */
+ {MT_PBF_CFG, 0xf40006}, /* Only enable Queue 2*/
+ {MT_MM40_PROT_CFG, 0x3F44084}, /* Initial Auto_Responder, because QA will turn off Auto-Responder*/
+ {MT_WPDMA_GLO_CFG, 0x00000030},
+ {MT_GF20_PROT_CFG, 0x01744004}, /* set 19:18 --> Short NAV for MIMO PS*/
+ {MT_GF40_PROT_CFG, 0x03F44084},
+ {MT_MM20_PROT_CFG, 0x01744004},
+ {MT_TXOP_CTRL_CFG, 0x0000583f, /*0x0000243f*/ /*0x000024bf*/}, /*Extension channel backoff.*/
+ {MT_TX_RTS_CFG, 0x00092b20},
+
+ {MT_EXP_ACK_TIME, 0x002400ca}, /* default value */
+ {MT_TXOP_HLDR_ET, 0x00000002},
+
+ /* Jerry comments 2008/01/16: we use SIFS = 10us in CCK defaultly, but it seems that 10us
+ is too small for INTEL 2200bg card, so in MBSS mode, the delta time between beacon0
+ and beacon1 is SIFS (10us), so if INTEL 2200bg card connects to BSS0, the ping
+ will always lost. So we change the SIFS of CCK from 10us to 16us. */
+ {MT_XIFS_TIME_CFG, 0x33a41010},
+ {MT_PWR_PIN_CFG, 0x00000000},
+};
+
+static const struct mt76_reg_pair mt76x0_mac_reg_table[] = {
+ /* {MT_IOCFG_6, 0xA0040080 }, */
+ {MT_PBF_SYS_CTRL, 0x00080c00 },
+ {MT_PBF_CFG, 0x77723c1f },
+ {MT_FCE_PSE_CTRL, 0x00000001 },
+
+ {MT_AMPDU_MAX_LEN_20M1S, 0xBAA99887 },
+
+ /* Delay bb_tx_pe for proper tx_mcs_pwr update */
+ {MT_TX_SW_CFG0, 0x00000601 },
+
+ /* Set rf_tx_pe deassert time to 1us by Chee's comment @MT7650_CR_setting_1018.xlsx */
+ {MT_TX_SW_CFG1, 0x00040000 },
+ {MT_TX_SW_CFG2, 0x00000000 },
+
+ /* disable Tx info report */
+ {0xa44, 0x0000000 },
+
+ {MT_HEADER_TRANS_CTRL_REG, 0x0},
+ {MT_TSO_CTRL, 0x0},
+
+ /* BB_PA_MODE_CFG0(0x1214) Keep default value @20120903 */
+ {MT_BB_PA_MODE_CFG1, 0x00500055},
+
+ /* RF_PA_MODE_CFG0(0x121C) Keep default value @20120903 */
+ {MT_RF_PA_MODE_CFG1, 0x00500055},
+
+ {MT_TX_ALC_CFG_0, 0x2F2F000C},
+ {MT_TX0_BB_GAIN_ATTEN, 0x00000000}, /* set BBP atten gain = 0 */
+
+ {MT_TX_PWR_CFG_0, 0x3A3A3A3A},
+ {MT_TX_PWR_CFG_1, 0x3A3A3A3A},
+ {MT_TX_PWR_CFG_2, 0x3A3A3A3A},
+ {MT_TX_PWR_CFG_3, 0x3A3A3A3A},
+ {MT_TX_PWR_CFG_4, 0x3A3A3A3A},
+ {MT_TX_PWR_CFG_7, 0x3A3A3A3A},
+ {MT_TX_PWR_CFG_8, 0x3A},
+ {MT_TX_PWR_CFG_9, 0x3A},
+ /* Enable Tx length > 4095 byte */
+ {0x150C, 0x00000002},
+
+ /* Disable bt_abort_tx_en(0x1238[21] = 0) which is not used at MT7650 */
+ {0x1238, 0x001700C8},
+ /* PMU_OCLEVEL<5:1> from default <5'b10010> to <5'b11011> for normal driver */
+ /* {MT_LDO_CTRL_0, 0x00A647B6}, */
+
+ /* Default LDO_DIG supply 1.26V, change to 1.2V */
+ {MT_LDO_CTRL_1, 0x6B006464 },
+/*
+ {MT_HT_BASIC_RATE, 0x00004003 },
+ {MT_HT_CTRL_CFG, 0x000001FF },
+*/
+};
+
+
+static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = {
+ {MT_BBP(CORE, 1), 0x00000002},
+ {MT_BBP(CORE, 4), 0x00000000},
+ {MT_BBP(CORE, 24), 0x00000000},
+ {MT_BBP(CORE, 32), 0x4003000a},
+ {MT_BBP(CORE, 42), 0x00000000},
+ {MT_BBP(CORE, 44), 0x00000000},
+
+ {MT_BBP(IBI, 11), 0x00000080},
+
+ /*
+ 0x2300[5] Default Antenna:
+ 0 for WIFI main antenna
+ 1 for WIFI aux antenna
+
+ */
+ {MT_BBP(AGC, 0), 0x00021400},
+ {MT_BBP(AGC, 1), 0x00000003},
+ {MT_BBP(AGC, 2), 0x003A6464},
+ {MT_BBP(AGC, 15), 0x88A28CB8},
+ {MT_BBP(AGC, 22), 0x00001E21},
+ {MT_BBP(AGC, 23), 0x0000272C},
+ {MT_BBP(AGC, 24), 0x00002F3A},
+ {MT_BBP(AGC, 25), 0x8000005A},
+ {MT_BBP(AGC, 26), 0x007C2005},
+ {MT_BBP(AGC, 34), 0x000A0C0C},
+ {MT_BBP(AGC, 37), 0x2121262C},
+ {MT_BBP(AGC, 41), 0x38383E45},
+ {MT_BBP(AGC, 57), 0x00001010},
+ {MT_BBP(AGC, 59), 0xBAA20E96},
+ {MT_BBP(AGC, 63), 0x00000001},
+
+ {MT_BBP(TXC, 0), 0x00280403},
+ {MT_BBP(TXC, 1), 0x00000000},
+
+ {MT_BBP(RXC, 1), 0x00000012},
+ {MT_BBP(RXC, 2), 0x00000011},
+ {MT_BBP(RXC, 3), 0x00000005},
+ {MT_BBP(RXC, 4), 0x00000000},
+ {MT_BBP(RXC, 5), 0xF977C4EC},
+ {MT_BBP(RXC, 7), 0x00000090},
+
+ {MT_BBP(TXO, 8), 0x00000000},
+
+ {MT_BBP(TXBE, 0), 0x00000000},
+ {MT_BBP(TXBE, 4), 0x00000004},
+ {MT_BBP(TXBE, 6), 0x00000000},
+ {MT_BBP(TXBE, 8), 0x00000014},
+ {MT_BBP(TXBE, 9), 0x20000000},
+ {MT_BBP(TXBE, 10), 0x00000000},
+ {MT_BBP(TXBE, 12), 0x00000000},
+ {MT_BBP(TXBE, 13), 0x00000000},
+ {MT_BBP(TXBE, 14), 0x00000000},
+ {MT_BBP(TXBE, 15), 0x00000000},
+ {MT_BBP(TXBE, 16), 0x00000000},
+ {MT_BBP(TXBE, 17), 0x00000000},
+
+ {MT_BBP(RXFE, 1), 0x00008800}, /* Add for E3 */
+ {MT_BBP(RXFE, 3), 0x00000000},
+ {MT_BBP(RXFE, 4), 0x00000000},
+
+ {MT_BBP(RXO, 13), 0x00000092},
+ {MT_BBP(RXO, 14), 0x00060612},
+ {MT_BBP(RXO, 15), 0xC8321B18},
+ {MT_BBP(RXO, 16), 0x0000001E},
+ {MT_BBP(RXO, 17), 0x00000000},
+ {MT_BBP(RXO, 18), 0xCC00A993},
+ {MT_BBP(RXO, 19), 0xB9CB9CB9},
+ {MT_BBP(RXO, 20), 0x26c00057},
+ {MT_BBP(RXO, 21), 0x00000001},
+ {MT_BBP(RXO, 24), 0x00000006},
+};
+
+static const struct mt76x0_bbp_switch_item mt76x0_bbp_switch_tab[] = {
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 8), 0x0E344EF0}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 8), 0x122C54F2}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 14), 0x310F2E39}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 14), 0x310F2A3F}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 32), 0x00003230}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 32), 0x0000181C}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 33), 0x00003240}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 33), 0x00003218}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 35), 0x11112016}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 35), 0x11112016}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(RXO, 28), 0x0000008A}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(RXO, 28), 0x0000008A}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 4), 0x1FEDA049}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 4), 0x1FECA054}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 6), 0x00000045}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 6), 0x0000000A}},
+
+ {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 12), 0x05052879}},
+ {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 12), 0x050528F9}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 12), 0x050528F9}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 13), 0x35050004}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 13), 0x2C3A0406}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 27), 0x000000E1}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 27), 0x000000EC}},
+
+ {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 28), 0x00060806}},
+ {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 28), 0x00050806}},
+ {RF_A_BAND | RF_BW_40, {MT_BBP(AGC, 28), 0x00060801}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_80, {MT_BBP(AGC, 28), 0x00060806}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 31), 0x00000F23}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 31), 0x00000F13}},
+
+ {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 39), 0x2A2A3036}},
+ {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 39), 0x2A2A2C36}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 39), 0x2A2A3036}},
+ {RF_A_BAND | RF_BW_80, {MT_BBP(AGC, 39), 0x2A2A2A36}},
+
+ {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 43), 0x27273438}},
+ {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 43), 0x27272D38}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 43), 0x27272B30}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 51), 0x17171C1C}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 51), 0xFFFFFFFF}},
+
+ {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 53), 0x26262A2F}},
+ {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 53), 0x2626322F}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 53), 0xFFFFFFFF}},
+
+ {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 55), 0x40404E58}},
+ {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 55), 0x40405858}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 55), 0xFFFFFFFF}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 58), 0x00001010}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 58), 0x00000000}},
+
+ {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(RXFE, 0), 0x3D5000E0}},
+ {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(RXFE, 0), 0x895000E0}},
+};
+
+static const struct mt76_reg_pair mt76x0_dcoc_tab[] = {
+ {MT_BBP(CAL, 47), 0x000010F0 },
+ {MT_BBP(CAL, 48), 0x00008080 },
+ {MT_BBP(CAL, 49), 0x00000F07 },
+ {MT_BBP(CAL, 50), 0x00000040 },
+ {MT_BBP(CAL, 51), 0x00000404 },
+ {MT_BBP(CAL, 52), 0x00080803 },
+ {MT_BBP(CAL, 53), 0x00000704 },
+ {MT_BBP(CAL, 54), 0x00002828 },
+ {MT_BBP(CAL, 55), 0x00005050 },
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h
new file mode 100644
index 000000000000..95d43efc1f3d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h
@@ -0,0 +1,772 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_PHY_INITVALS_H
+#define __MT76X0U_PHY_INITVALS_H
+
+#define RF_REG_PAIR(bank, reg, value) \
+ { (bank) << 16 | (reg), value }
+
+
+static const struct mt76_reg_pair mt76x0_rf_central_tab[] = {
+/*
+ Bank 0 - For central blocks: BG, PLL, XTAL, LO, ADC/DAC
+*/
+ { MT_RF(0, 1), 0x01},
+ { MT_RF(0, 2), 0x11},
+
+ /*
+ R3 ~ R7: VCO Cal.
+ */
+ { MT_RF(0, 3), 0x73}, /* VCO Freq Cal - No Bypass, VCO Amp Cal - No Bypass */
+ { MT_RF(0, 4), 0x30}, /* R4 b<7>=1, VCO cal */
+ { MT_RF(0, 5), 0x00},
+ { MT_RF(0, 6), 0x41}, /* Set the open loop amplitude to middle since bypassing amplitude calibration */
+ { MT_RF(0, 7), 0x00},
+
+ /*
+ XO
+ */
+ { MT_RF(0, 8), 0x00},
+ { MT_RF(0, 9), 0x00},
+ { MT_RF(0, 10), 0x0C},
+ { MT_RF(0, 11), 0x00},
+ { MT_RF(0, 12), 0x00},
+
+ /*
+ BG
+ */
+ { MT_RF(0, 13), 0x00},
+ { MT_RF(0, 14), 0x00},
+ { MT_RF(0, 15), 0x00},
+
+ /*
+ LDO
+ */
+ { MT_RF(0, 19), 0x20},
+ /*
+ XO
+ */
+ { MT_RF(0, 20), 0x22},
+ { MT_RF(0, 21), 0x12},
+ { MT_RF(0, 23), 0x00},
+ { MT_RF(0, 24), 0x33}, /* See band selection for R24<1:0> */
+ { MT_RF(0, 25), 0x00},
+
+ /*
+ PLL, See Freq Selection
+ */
+ { MT_RF(0, 26), 0x00},
+ { MT_RF(0, 27), 0x00},
+ { MT_RF(0, 28), 0x00},
+ { MT_RF(0, 29), 0x00},
+ { MT_RF(0, 30), 0x00},
+ { MT_RF(0, 31), 0x00},
+ { MT_RF(0, 32), 0x00},
+ { MT_RF(0, 33), 0x00},
+ { MT_RF(0, 34), 0x00},
+ { MT_RF(0, 35), 0x00},
+ { MT_RF(0, 36), 0x00},
+ { MT_RF(0, 37), 0x00},
+
+ /*
+ LO Buffer
+ */
+ { MT_RF(0, 38), 0x2F},
+
+ /*
+ Test Ports
+ */
+ { MT_RF(0, 64), 0x00},
+ { MT_RF(0, 65), 0x80},
+ { MT_RF(0, 66), 0x01},
+ { MT_RF(0, 67), 0x04},
+
+ /*
+ ADC/DAC
+ */
+ { MT_RF(0, 68), 0x00},
+ { MT_RF(0, 69), 0x08},
+ { MT_RF(0, 70), 0x08},
+ { MT_RF(0, 71), 0x40},
+ { MT_RF(0, 72), 0xD0},
+ { MT_RF(0, 73), 0x93},
+};
+
+static const struct mt76_reg_pair mt76x0_rf_2g_channel_0_tab[] = {
+/*
+ Bank 5 - Channel 0 2G RF registers
+*/
+ /*
+ RX logic operation
+ */
+ /* RF_R00 Change in SelectBand6590 */
+
+ { MT_RF(5, 2), 0x0C}, /* 5G+2G (MT7610U) */
+ { MT_RF(5, 3), 0x00},
+
+ /*
+ TX logic operation
+ */
+ { MT_RF(5, 4), 0x00},
+ { MT_RF(5, 5), 0x84},
+ { MT_RF(5, 6), 0x02},
+
+ /*
+ LDO
+ */
+ { MT_RF(5, 7), 0x00},
+ { MT_RF(5, 8), 0x00},
+ { MT_RF(5, 9), 0x00},
+
+ /*
+ RX
+ */
+ { MT_RF(5, 10), 0x51},
+ { MT_RF(5, 11), 0x22},
+ { MT_RF(5, 12), 0x22},
+ { MT_RF(5, 13), 0x0F},
+ { MT_RF(5, 14), 0x47}, /* Increase mixer current for more gain */
+ { MT_RF(5, 15), 0x25},
+ { MT_RF(5, 16), 0xC7}, /* Tune LNA2 tank */
+ { MT_RF(5, 17), 0x00},
+ { MT_RF(5, 18), 0x00},
+ { MT_RF(5, 19), 0x30}, /* Improve max Pin */
+ { MT_RF(5, 20), 0x33},
+ { MT_RF(5, 21), 0x02},
+ { MT_RF(5, 22), 0x32}, /* Tune LNA1 tank */
+ { MT_RF(5, 23), 0x00},
+ { MT_RF(5, 24), 0x25},
+ { MT_RF(5, 26), 0x00},
+ { MT_RF(5, 27), 0x12},
+ { MT_RF(5, 28), 0x0F},
+ { MT_RF(5, 29), 0x00},
+
+ /*
+ LOGEN
+ */
+ { MT_RF(5, 30), 0x51}, /* Tune LOGEN tank */
+ { MT_RF(5, 31), 0x35},
+ { MT_RF(5, 32), 0x31},
+ { MT_RF(5, 33), 0x31},
+ { MT_RF(5, 34), 0x34},
+ { MT_RF(5, 35), 0x03},
+ { MT_RF(5, 36), 0x00},
+
+ /*
+ TX
+ */
+ { MT_RF(5, 37), 0xDD}, /* Improve 3.2GHz spur */
+ { MT_RF(5, 38), 0xB3},
+ { MT_RF(5, 39), 0x33},
+ { MT_RF(5, 40), 0xB1},
+ { MT_RF(5, 41), 0x71},
+ { MT_RF(5, 42), 0xF2},
+ { MT_RF(5, 43), 0x47},
+ { MT_RF(5, 44), 0x77},
+ { MT_RF(5, 45), 0x0E},
+ { MT_RF(5, 46), 0x10},
+ { MT_RF(5, 47), 0x00},
+ { MT_RF(5, 48), 0x53},
+ { MT_RF(5, 49), 0x03},
+ { MT_RF(5, 50), 0xEF},
+ { MT_RF(5, 51), 0xC7},
+ { MT_RF(5, 52), 0x62},
+ { MT_RF(5, 53), 0x62},
+ { MT_RF(5, 54), 0x00},
+ { MT_RF(5, 55), 0x00},
+ { MT_RF(5, 56), 0x0F},
+ { MT_RF(5, 57), 0x0F},
+ { MT_RF(5, 58), 0x16},
+ { MT_RF(5, 59), 0x16},
+ { MT_RF(5, 60), 0x10},
+ { MT_RF(5, 61), 0x10},
+ { MT_RF(5, 62), 0xD0},
+ { MT_RF(5, 63), 0x6C},
+ { MT_RF(5, 64), 0x58},
+ { MT_RF(5, 65), 0x58},
+ { MT_RF(5, 66), 0xF2},
+ { MT_RF(5, 67), 0xE8},
+ { MT_RF(5, 68), 0xF0},
+ { MT_RF(5, 69), 0xF0},
+ { MT_RF(5, 127), 0x04},
+};
+
+static const struct mt76_reg_pair mt76x0_rf_5g_channel_0_tab[] = {
+/*
+ Bank 6 - Channel 0 5G RF registers
+*/
+ /*
+ RX logic operation
+ */
+ /* RF_R00 Change in SelectBandmt76x0 */
+
+ { MT_RF(6, 2), 0x0C},
+ { MT_RF(6, 3), 0x00},
+
+ /*
+ TX logic operation
+ */
+ { MT_RF(6, 4), 0x00},
+ { MT_RF(6, 5), 0x84},
+ { MT_RF(6, 6), 0x02},
+
+ /*
+ LDO
+ */
+ { MT_RF(6, 7), 0x00},
+ { MT_RF(6, 8), 0x00},
+ { MT_RF(6, 9), 0x00},
+
+ /*
+ RX
+ */
+ { MT_RF(6, 10), 0x00},
+ { MT_RF(6, 11), 0x01},
+
+ { MT_RF(6, 13), 0x23},
+ { MT_RF(6, 14), 0x00},
+ { MT_RF(6, 15), 0x04},
+ { MT_RF(6, 16), 0x22},
+
+ { MT_RF(6, 18), 0x08},
+ { MT_RF(6, 19), 0x00},
+ { MT_RF(6, 20), 0x00},
+ { MT_RF(6, 21), 0x00},
+ { MT_RF(6, 22), 0xFB},
+
+ /*
+ LOGEN5G
+ */
+ { MT_RF(6, 25), 0x76},
+ { MT_RF(6, 26), 0x24},
+ { MT_RF(6, 27), 0x04},
+ { MT_RF(6, 28), 0x00},
+ { MT_RF(6, 29), 0x00},
+
+ /*
+ TX
+ */
+ { MT_RF(6, 37), 0xBB},
+ { MT_RF(6, 38), 0xB3},
+
+ { MT_RF(6, 40), 0x33},
+ { MT_RF(6, 41), 0x33},
+
+ { MT_RF(6, 43), 0x03},
+ { MT_RF(6, 44), 0xB3},
+
+ { MT_RF(6, 46), 0x17},
+ { MT_RF(6, 47), 0x0E},
+ { MT_RF(6, 48), 0x10},
+ { MT_RF(6, 49), 0x07},
+
+ { MT_RF(6, 62), 0x00},
+ { MT_RF(6, 63), 0x00},
+ { MT_RF(6, 64), 0xF1},
+ { MT_RF(6, 65), 0x0F},
+};
+
+static const struct mt76_reg_pair mt76x0_rf_vga_channel_0_tab[] = {
+/*
+ Bank 7 - Channel 0 VGA RF registers
+*/
+ /* E3 CR */
+ { MT_RF(7, 0), 0x47}, /* Allow BBP/MAC to do calibration */
+ { MT_RF(7, 1), 0x00},
+ { MT_RF(7, 2), 0x00},
+ { MT_RF(7, 3), 0x00},
+ { MT_RF(7, 4), 0x00},
+
+ { MT_RF(7, 10), 0x13},
+ { MT_RF(7, 11), 0x0F},
+ { MT_RF(7, 12), 0x13}, /* For dcoc */
+ { MT_RF(7, 13), 0x13}, /* For dcoc */
+ { MT_RF(7, 14), 0x13}, /* For dcoc */
+ { MT_RF(7, 15), 0x20}, /* For dcoc */
+ { MT_RF(7, 16), 0x22}, /* For dcoc */
+
+ { MT_RF(7, 17), 0x7C},
+
+ { MT_RF(7, 18), 0x00},
+ { MT_RF(7, 19), 0x00},
+ { MT_RF(7, 20), 0x00},
+ { MT_RF(7, 21), 0xF1},
+ { MT_RF(7, 22), 0x11},
+ { MT_RF(7, 23), 0xC2},
+ { MT_RF(7, 24), 0x41},
+ { MT_RF(7, 25), 0x20},
+ { MT_RF(7, 26), 0x40},
+ { MT_RF(7, 27), 0xD7},
+ { MT_RF(7, 28), 0xA2},
+ { MT_RF(7, 29), 0x60},
+ { MT_RF(7, 30), 0x49},
+ { MT_RF(7, 31), 0x20},
+ { MT_RF(7, 32), 0x44},
+ { MT_RF(7, 33), 0xC1},
+ { MT_RF(7, 34), 0x60},
+ { MT_RF(7, 35), 0xC0},
+
+ { MT_RF(7, 61), 0x01},
+
+ { MT_RF(7, 72), 0x3C},
+ { MT_RF(7, 73), 0x34},
+ { MT_RF(7, 74), 0x00},
+};
+
+static const struct mt76x0_rf_switch_item mt76x0_rf_bw_switch_tab[] = {
+ /* Bank, Register, Bw/Band, Value */
+ { MT_RF(0, 17), RF_G_BAND | RF_BW_20, 0x00},
+ { MT_RF(0, 17), RF_G_BAND | RF_BW_40, 0x00},
+ { MT_RF(0, 17), RF_A_BAND | RF_BW_20, 0x00},
+ { MT_RF(0, 17), RF_A_BAND | RF_BW_40, 0x00},
+ { MT_RF(0, 17), RF_A_BAND | RF_BW_80, 0x00},
+
+ /* TODO: need to check B7.R6 & B7.R7 setting for 2.4G again @20121112 */
+ { MT_RF(7, 6), RF_G_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 6), RF_G_BAND | RF_BW_40, 0x1C},
+ { MT_RF(7, 6), RF_A_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 6), RF_A_BAND | RF_BW_40, 0x20},
+ { MT_RF(7, 6), RF_A_BAND | RF_BW_80, 0x10},
+
+ { MT_RF(7, 7), RF_G_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 7), RF_G_BAND | RF_BW_40, 0x20},
+ { MT_RF(7, 7), RF_A_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 7), RF_A_BAND | RF_BW_40, 0x20},
+ { MT_RF(7, 7), RF_A_BAND | RF_BW_80, 0x10},
+
+ { MT_RF(7, 8), RF_G_BAND | RF_BW_20, 0x03},
+ { MT_RF(7, 8), RF_G_BAND | RF_BW_40, 0x01},
+ { MT_RF(7, 8), RF_A_BAND | RF_BW_20, 0x03},
+ { MT_RF(7, 8), RF_A_BAND | RF_BW_40, 0x01},
+ { MT_RF(7, 8), RF_A_BAND | RF_BW_80, 0x00},
+
+ /* TODO: need to check B7.R58 & B7.R59 setting for 2.4G again @20121112 */
+ { MT_RF(7, 58), RF_G_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 58), RF_G_BAND | RF_BW_40, 0x40},
+ { MT_RF(7, 58), RF_A_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 58), RF_A_BAND | RF_BW_40, 0x40},
+ { MT_RF(7, 58), RF_A_BAND | RF_BW_80, 0x10},
+
+ { MT_RF(7, 59), RF_G_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 59), RF_G_BAND | RF_BW_40, 0x40},
+ { MT_RF(7, 59), RF_A_BAND | RF_BW_20, 0x40},
+ { MT_RF(7, 59), RF_A_BAND | RF_BW_40, 0x40},
+ { MT_RF(7, 59), RF_A_BAND | RF_BW_80, 0x10},
+
+ { MT_RF(7, 60), RF_G_BAND | RF_BW_20, 0xAA},
+ { MT_RF(7, 60), RF_G_BAND | RF_BW_40, 0xAA},
+ { MT_RF(7, 60), RF_A_BAND | RF_BW_20, 0xAA},
+ { MT_RF(7, 60), RF_A_BAND | RF_BW_40, 0xAA},
+ { MT_RF(7, 60), RF_A_BAND | RF_BW_80, 0xAA},
+
+ { MT_RF(7, 76), RF_BW_20, 0x40},
+ { MT_RF(7, 76), RF_BW_40, 0x40},
+ { MT_RF(7, 76), RF_BW_80, 0x10},
+
+ { MT_RF(7, 77), RF_BW_20, 0x40},
+ { MT_RF(7, 77), RF_BW_40, 0x40},
+ { MT_RF(7, 77), RF_BW_80, 0x10},
+};
+
+static const struct mt76x0_rf_switch_item mt76x0_rf_band_switch_tab[] = {
+ /* Bank, Register, Bw/Band, Value */
+ { MT_RF(0, 16), RF_G_BAND, 0x20},
+ { MT_RF(0, 16), RF_A_BAND, 0x20},
+
+ { MT_RF(0, 18), RF_G_BAND, 0x00},
+ { MT_RF(0, 18), RF_A_BAND, 0x00},
+
+ { MT_RF(0, 39), RF_G_BAND, 0x36},
+ { MT_RF(0, 39), RF_A_BAND_LB, 0x34},
+ { MT_RF(0, 39), RF_A_BAND_MB, 0x33},
+ { MT_RF(0, 39), RF_A_BAND_HB, 0x31},
+ { MT_RF(0, 39), RF_A_BAND_11J, 0x36},
+
+ { MT_RF(6, 12), RF_A_BAND_LB, 0x44},
+ { MT_RF(6, 12), RF_A_BAND_MB, 0x44},
+ { MT_RF(6, 12), RF_A_BAND_HB, 0x55},
+ { MT_RF(6, 12), RF_A_BAND_11J, 0x44},
+
+ { MT_RF(6, 17), RF_A_BAND_LB, 0x02},
+ { MT_RF(6, 17), RF_A_BAND_MB, 0x00},
+ { MT_RF(6, 17), RF_A_BAND_HB, 0x00},
+ { MT_RF(6, 17), RF_A_BAND_11J, 0x05},
+
+ { MT_RF(6, 24), RF_A_BAND_LB, 0xA1},
+ { MT_RF(6, 24), RF_A_BAND_MB, 0x41},
+ { MT_RF(6, 24), RF_A_BAND_HB, 0x21},
+ { MT_RF(6, 24), RF_A_BAND_11J, 0xE1},
+
+ { MT_RF(6, 39), RF_A_BAND_LB, 0x36},
+ { MT_RF(6, 39), RF_A_BAND_MB, 0x34},
+ { MT_RF(6, 39), RF_A_BAND_HB, 0x32},
+ { MT_RF(6, 39), RF_A_BAND_11J, 0x37},
+
+ { MT_RF(6, 42), RF_A_BAND_LB, 0xFB},
+ { MT_RF(6, 42), RF_A_BAND_MB, 0xF3},
+ { MT_RF(6, 42), RF_A_BAND_HB, 0xEB},
+ { MT_RF(6, 42), RF_A_BAND_11J, 0xEB},
+
+ /* Move R6-R45, R50~R59 to mt76x0_RF_INT_PA_5G_Channel_0_RegTb/mt76x0_RF_EXT_PA_5G_Channel_0_RegTb */
+
+ { MT_RF(6, 127), RF_G_BAND, 0x84},
+ { MT_RF(6, 127), RF_A_BAND, 0x04},
+
+ { MT_RF(7, 5), RF_G_BAND, 0x40},
+ { MT_RF(7, 5), RF_A_BAND, 0x00},
+
+ { MT_RF(7, 9), RF_G_BAND, 0x00},
+ { MT_RF(7, 9), RF_A_BAND, 0x00},
+
+ { MT_RF(7, 70), RF_G_BAND, 0x00},
+ { MT_RF(7, 70), RF_A_BAND, 0x6D},
+
+ { MT_RF(7, 71), RF_G_BAND, 0x00},
+ { MT_RF(7, 71), RF_A_BAND, 0xB0},
+
+ { MT_RF(7, 78), RF_G_BAND, 0x00},
+ { MT_RF(7, 78), RF_A_BAND, 0x55},
+
+ { MT_RF(7, 79), RF_G_BAND, 0x00},
+ { MT_RF(7, 79), RF_A_BAND, 0x55},
+};
+
+static const struct mt76x0_freq_item mt76x0_frequency_plan[] = {
+ {1, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xE2, 0x40, 0x02, 0x40, 0x02, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 2412 */
+ {2, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xE4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA1, 0, 0x30, 0, 0, 0x1}, /* Freq 2417 */
+ {3, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xE2, 0x40, 0x07, 0x40, 0x0B, 0, 0, 1, 0x50, 0, 0x30, 0, 0, 0x0}, /* Freq 2422 */
+ {4, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xD4, 0x40, 0x02, 0x40, 0x09, 0, 0, 1, 0x50, 0, 0x30, 0, 0, 0x0}, /* Freq 2427 */
+ {5, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA2, 0, 0x30, 0, 0, 0x1}, /* Freq 2432 */
+ {6, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x07, 0, 0, 1, 0xA2, 0, 0x30, 0, 0, 0x1}, /* Freq 2437 */
+ {7, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xE2, 0x40, 0x02, 0x40, 0x07, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 2442 */
+ {8, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA3, 0, 0x30, 0, 0, 0x1}, /* Freq 2447 */
+ {9, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xF2, 0x40, 0x07, 0x40, 0x0D, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 2452 */
+ {10, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xD4, 0x40, 0x02, 0x40, 0x09, 0, 0, 1, 0x51, 0, 0x30, 0, 0, 0x0}, /* Freq 2457 */
+ {11, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA4, 0, 0x30, 0, 0, 0x1}, /* Freq 2462 */
+ {12, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x07, 0, 0, 1, 0xA4, 0, 0x30, 0, 0, 0x1}, /* Freq 2467 */
+ {13, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xF2, 0x40, 0x02, 0x40, 0x02, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 2472 */
+ {14, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xF2, 0x40, 0x02, 0x40, 0x04, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 2484 */
+
+ {183, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 4915 */
+ {184, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x00, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4920 */
+ {185, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4925 */
+ {187, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4935 */
+ {188, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4940 */
+ {189, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4945 */
+ {192, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4960 */
+ {196, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4980 */
+
+ {36, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5180 */
+ {37, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5185 */
+ {38, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5190 */
+ {39, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5195 */
+ {40, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5200 */
+ {41, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5205 */
+ {42, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5210 */
+ {43, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5215 */
+ {44, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5220 */
+ {45, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5225 */
+ {46, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5230 */
+ {47, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5235 */
+ {48, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5240 */
+ {49, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5245 */
+ {50, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5250 */
+ {51, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5255 */
+ {52, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5260 */
+ {53, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5265 */
+ {54, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5270 */
+ {55, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5275 */
+ {56, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5280 */
+ {57, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5285 */
+ {58, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5290 */
+ {59, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5295 */
+ {60, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5300 */
+ {61, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5305 */
+ {62, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5310 */
+ {63, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5315 */
+ {64, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5320 */
+
+ {100, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5500 */
+ {101, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5505 */
+ {102, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5510 */
+ {103, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5515 */
+ {104, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5520 */
+ {105, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5525 */
+ {106, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5530 */
+ {107, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5535 */
+ {108, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5540 */
+ {109, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5545 */
+ {110, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5550 */
+ {111, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5555 */
+ {112, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5560 */
+ {113, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5565 */
+ {114, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5570 */
+ {115, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5575 */
+ {116, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5580 */
+ {117, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5585 */
+ {118, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5590 */
+ {119, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5595 */
+ {120, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5600 */
+ {121, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5605 */
+ {122, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5610 */
+ {123, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5615 */
+ {124, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5620 */
+ {125, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5625 */
+ {126, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5630 */
+ {127, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5635 */
+ {128, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5640 */
+ {129, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5645 */
+ {130, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5650 */
+ {131, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5655 */
+ {132, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5660 */
+ {133, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5665 */
+ {134, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5670 */
+ {135, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5675 */
+ {136, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5680 */
+
+ {137, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5685 */
+ {138, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5690 */
+ {139, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5695 */
+ {140, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5700 */
+ {141, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5705 */
+ {142, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5710 */
+ {143, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5715 */
+ {144, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5720 */
+ {145, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5725 */
+ {146, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5730 */
+ {147, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5735 */
+ {148, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5740 */
+ {149, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5745 */
+ {150, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5750 */
+ {151, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5755 */
+ {152, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5760 */
+ {153, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5765 */
+ {154, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5770 */
+ {155, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5775 */
+ {156, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5780 */
+ {157, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5785 */
+ {158, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5790 */
+ {159, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5795 */
+ {160, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5800 */
+ {161, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5805 */
+ {162, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5810 */
+ {163, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5815 */
+ {164, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5820 */
+ {165, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5825 */
+ {166, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5830 */
+ {167, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5835 */
+ {168, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5840 */
+ {169, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5845 */
+ {170, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5850 */
+ {171, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5855 */
+ {172, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5860 */
+ {173, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5865 */
+};
+
+static const struct mt76x0_freq_item mt76x0_sdm_frequency_plan[] = {
+ {1, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0xCCCC, 0x3}, /* Freq 2412 */
+ {2, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x12222, 0x3}, /* Freq 2417 */
+ {3, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x17777, 0x3}, /* Freq 2422 */
+ {4, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x1CCCC, 0x3}, /* Freq 2427 */
+ {5, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x22222, 0x3}, /* Freq 2432 */
+ {6, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x27777, 0x3}, /* Freq 2437 */
+ {7, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x2CCCC, 0x3}, /* Freq 2442 */
+ {8, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x32222, 0x3}, /* Freq 2447 */
+ {9, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x37777, 0x3}, /* Freq 2452 */
+ {10, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x3CCCC, 0x3}, /* Freq 2457 */
+ {11, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x2222, 0x3}, /* Freq 2462 */
+ {12, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x7777, 0x3}, /* Freq 2467 */
+ {13, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0xCCCC, 0x3}, /* Freq 2472 */
+ {14, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x19999, 0x3}, /* Freq 2484 */
+
+ {183, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 4915 */
+ {184, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x0, 0x3}, /* Freq 4920 */
+ {185, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x2AAA, 0x3}, /* Freq 4925 */
+ {187, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x8000, 0x3}, /* Freq 4935 */
+ {188, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0xAAAA, 0x3}, /* Freq 4940 */
+ {189, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0xD555, 0x3}, /* Freq 4945 */
+ {192, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 4960 */
+ {196, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 4980 */
+
+ {36, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0xAAAA, 0x3}, /* Freq 5180 */
+ {37, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0xD555, 0x3}, /* Freq 5185 */
+ {38, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5190 */
+ {39, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5195 */
+ {40, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5200 */
+ {41, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5205 */
+ {42, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5210 */
+ {43, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5215 */
+ {44, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5220 */
+ {45, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5225 */
+ {46, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5230 */
+ {47, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5235 */
+ {48, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5240 */
+ {49, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5245 */
+ {50, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5250 */
+ {51, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5255 */
+ {52, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5260 */
+ {53, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5265 */
+ {54, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5270 */
+ {55, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5275 */
+ {56, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5280 */
+ {57, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5285 */
+ {58, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5290 */
+ {59, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5295 */
+ {60, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5300 */
+ {61, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5305 */
+ {62, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5310 */
+ {63, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5315 */
+ {64, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5320 */
+
+ {100, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5500 */
+ {101, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5505 */
+ {102, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5510 */
+ {103, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5515 */
+ {104, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5520 */
+ {105, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5525 */
+ {106, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5530 */
+ {107, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5535 */
+ {108, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5540 */
+ {109, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5545 */
+ {110, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5550 */
+ {111, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5555 */
+ {112, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5560 */
+ {113, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5565 */
+ {114, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5570 */
+ {115, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5575 */
+ {116, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5580 */
+ {117, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5585 */
+ {118, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5590 */
+ {119, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5595 */
+ {120, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5600 */
+ {121, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5605 */
+ {122, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5610 */
+ {123, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5615 */
+ {124, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5620 */
+ {125, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5625 */
+ {126, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5630 */
+ {127, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5635 */
+ {128, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5640 */
+ {129, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5645 */
+ {130, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5650 */
+ {131, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5655 */
+ {132, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5660 */
+ {133, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5665 */
+ {134, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5670 */
+ {135, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5675 */
+ {136, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5680 */
+
+ {137, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5685 */
+ {138, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5690 */
+ {139, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5695 */
+ {140, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5700 */
+ {141, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5705 */
+ {142, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5710 */
+ {143, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5715 */
+ {144, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5720 */
+ {145, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5725 */
+ {146, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5730 */
+ {147, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5735 */
+ {148, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5740 */
+ {149, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5745 */
+ {150, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5750 */
+ {151, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5755 */
+ {152, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5760 */
+ {153, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5765 */
+ {154, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5770 */
+ {155, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5775 */
+ {156, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5780 */
+ {157, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5785 */
+ {158, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5790 */
+ {159, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5795 */
+ {160, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5800 */
+ {161, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5805 */
+ {162, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5810 */
+ {163, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5815 */
+ {164, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5820 */
+ {165, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5825 */
+ {166, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5830 */
+ {167, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5835 */
+ {168, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5840 */
+ {169, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5845 */
+ {170, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5850 */
+ {171, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5855 */
+ {172, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5860 */
+ {173, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5865 */
+};
+
+static const u8 mt76x0_sdm_channel[] = {
+ 183, 185, 43, 45, 54, 55, 57, 58, 102, 103, 105, 106, 115, 117, 126, 127, 129, 130, 139, 141, 150, 151, 153, 154, 163, 165
+};
+
+static const struct mt76x0_rf_switch_item mt76x0_rf_ext_pa_tab[] = {
+ { MT_RF(6, 45), RF_A_BAND_LB, 0x63},
+ { MT_RF(6, 45), RF_A_BAND_MB, 0x43},
+ { MT_RF(6, 45), RF_A_BAND_HB, 0x33},
+ { MT_RF(6, 45), RF_A_BAND_11J, 0x73},
+
+ { MT_RF(6, 50), RF_A_BAND_LB, 0x02},
+ { MT_RF(6, 50), RF_A_BAND_MB, 0x02},
+ { MT_RF(6, 50), RF_A_BAND_HB, 0x02},
+ { MT_RF(6, 50), RF_A_BAND_11J, 0x02},
+
+ { MT_RF(6, 51), RF_A_BAND_LB, 0x02},
+ { MT_RF(6, 51), RF_A_BAND_MB, 0x02},
+ { MT_RF(6, 51), RF_A_BAND_HB, 0x02},
+ { MT_RF(6, 51), RF_A_BAND_11J, 0x02},
+
+ { MT_RF(6, 52), RF_A_BAND_LB, 0x08},
+ { MT_RF(6, 52), RF_A_BAND_MB, 0x08},
+ { MT_RF(6, 52), RF_A_BAND_HB, 0x08},
+ { MT_RF(6, 52), RF_A_BAND_11J, 0x08},
+
+ { MT_RF(6, 53), RF_A_BAND_LB, 0x08},
+ { MT_RF(6, 53), RF_A_BAND_MB, 0x08},
+ { MT_RF(6, 53), RF_A_BAND_HB, 0x08},
+ { MT_RF(6, 53), RF_A_BAND_11J, 0x08},
+
+ { MT_RF(6, 54), RF_A_BAND_LB, 0x0A},
+ { MT_RF(6, 54), RF_A_BAND_MB, 0x0A},
+ { MT_RF(6, 54), RF_A_BAND_HB, 0x0A},
+ { MT_RF(6, 54), RF_A_BAND_11J, 0x0A},
+
+ { MT_RF(6, 55), RF_A_BAND_LB, 0x0A},
+ { MT_RF(6, 55), RF_A_BAND_MB, 0x0A},
+ { MT_RF(6, 55), RF_A_BAND_HB, 0x0A},
+ { MT_RF(6, 55), RF_A_BAND_11J, 0x0A},
+
+ { MT_RF(6, 56), RF_A_BAND_LB, 0x05},
+ { MT_RF(6, 56), RF_A_BAND_MB, 0x05},
+ { MT_RF(6, 56), RF_A_BAND_HB, 0x05},
+ { MT_RF(6, 56), RF_A_BAND_11J, 0x05},
+
+ { MT_RF(6, 57), RF_A_BAND_LB, 0x05},
+ { MT_RF(6, 57), RF_A_BAND_MB, 0x05},
+ { MT_RF(6, 57), RF_A_BAND_HB, 0x05},
+ { MT_RF(6, 57), RF_A_BAND_11J, 0x05},
+
+ { MT_RF(6, 58), RF_A_BAND_LB, 0x05},
+ { MT_RF(6, 58), RF_A_BAND_MB, 0x03},
+ { MT_RF(6, 58), RF_A_BAND_HB, 0x02},
+ { MT_RF(6, 58), RF_A_BAND_11J, 0x07},
+
+ { MT_RF(6, 59), RF_A_BAND_LB, 0x05},
+ { MT_RF(6, 59), RF_A_BAND_MB, 0x03},
+ { MT_RF(6, 59), RF_A_BAND_HB, 0x02},
+ { MT_RF(6, 59), RF_A_BAND_11J, 0x07},
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c
new file mode 100644
index 000000000000..95f28492a843
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c
@@ -0,0 +1,660 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "trace.h"
+#include <linux/etherdevice.h>
+
+static void
+mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
+ enum nl80211_band band)
+{
+ u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+ txrate->idx = 0;
+ txrate->flags = 0;
+ txrate->count = 1;
+
+ switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ if (band == NL80211_BAND_2GHZ)
+ idx += 4;
+
+ txrate->idx = idx;
+ return;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8)
+ idx -= 8;
+
+ txrate->idx = idx;
+ return;
+ case MT_PHY_TYPE_HT_GF:
+ txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ /* fall through */
+ case MT_PHY_TYPE_HT:
+ txrate->flags |= IEEE80211_TX_RC_MCS;
+ txrate->idx = idx;
+ break;
+ case MT_PHY_TYPE_VHT:
+ txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
+ txrate->idx = idx;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+ case MT_PHY_BW_20:
+ break;
+ case MT_PHY_BW_40:
+ txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ break;
+ case MT_PHY_BW_80:
+ txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ if (rate & MT_RXWI_RATE_SGI)
+ txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
+}
+
+static void
+mt76_mac_fill_tx_status(struct mt76x0_dev *dev, struct ieee80211_tx_info *info,
+ struct mt76_tx_status *st, int n_frames)
+{
+ struct ieee80211_tx_rate *rate = info->status.rates;
+ int cur_idx, last_rate;
+ int i;
+
+ if (!n_frames)
+ return;
+
+ last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
+ mt76_mac_process_tx_rate(&rate[last_rate], st->rate,
+ dev->mt76.chandef.chan->band);
+ if (last_rate < IEEE80211_TX_MAX_RATES - 1)
+ rate[last_rate + 1].idx = -1;
+
+ cur_idx = rate[last_rate].idx + last_rate;
+ for (i = 0; i <= last_rate; i++) {
+ rate[i].flags = rate[last_rate].flags;
+ rate[i].idx = max_t(int, 0, cur_idx - i);
+ rate[i].count = 1;
+ }
+
+ rate[last_rate - 1].count = st->retry + 1 - last_rate;
+
+ info->status.ampdu_len = n_frames;
+ info->status.ampdu_ack_len = st->success ? n_frames : 0;
+
+ if (st->pktid & MT_TXWI_PKTID_PROBE)
+ info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+
+ if (st->aggr)
+ info->flags |= IEEE80211_TX_CTL_AMPDU |
+ IEEE80211_TX_STAT_AMPDU;
+
+ if (!st->ack_req)
+ info->flags |= IEEE80211_TX_CTL_NO_ACK;
+ else if (st->success)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+}
+
+u16 mt76x0_mac_tx_rate_val(struct mt76x0_dev *dev,
+ const struct ieee80211_tx_rate *rate, u8 *nss_val)
+{
+ u16 rateval;
+ u8 phy, rate_idx;
+ u8 nss = 1;
+ u8 bw = 0;
+
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 4);
+ phy = MT_PHY_TYPE_VHT;
+ if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ bw = 2;
+ else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = 1;
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 3);
+ phy = MT_PHY_TYPE_HT;
+ if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+ phy = MT_PHY_TYPE_HT_GF;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = 1;
+ } else {
+ const struct ieee80211_rate *r;
+ int band = dev->mt76.chandef.chan->band;
+ u16 val;
+
+ r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx];
+ if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ val = r->hw_value_short;
+ else
+ val = r->hw_value;
+
+ phy = val >> 8;
+ rate_idx = val & 0xff;
+ bw = 0;
+ }
+
+ rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
+ rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
+ rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ rateval |= MT_RXWI_RATE_SGI;
+
+ *nss_val = nss;
+ return cpu_to_le16(rateval);
+}
+
+void mt76x0_mac_wcid_set_rate(struct mt76x0_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->mt76.lock, flags);
+ wcid->tx_rate = mt76x0_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
+ wcid->tx_rate_set = true;
+ spin_unlock_irqrestore(&dev->mt76.lock, flags);
+}
+
+struct mt76_tx_status mt76x0_mac_fetch_tx_status(struct mt76x0_dev *dev)
+{
+ struct mt76_tx_status stat = {};
+ u32 stat2, stat1;
+
+ stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
+ stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
+
+ stat.valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
+ stat.success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
+ stat.aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
+ stat.ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
+ stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
+ stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
+
+ stat.retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
+ stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
+
+ return stat;
+}
+
+void mt76x0_send_tx_status(struct mt76x0_dev *dev, struct mt76_tx_status *stat, u8 *update)
+{
+ struct ieee80211_tx_info info = {};
+ struct ieee80211_sta *sta = NULL;
+ struct mt76_wcid *wcid = NULL;
+ struct mt76_sta *msta = NULL;
+
+ rcu_read_lock();
+ if (stat->wcid < ARRAY_SIZE(dev->wcid))
+ wcid = rcu_dereference(dev->wcid[stat->wcid]);
+
+ if (wcid) {
+ void *priv;
+ priv = msta = container_of(wcid, struct mt76_sta, wcid);
+ sta = container_of(priv, struct ieee80211_sta, drv_priv);
+ }
+
+ if (msta && stat->aggr) {
+ u32 stat_val, stat_cache;
+
+ stat_val = stat->rate;
+ stat_val |= ((u32) stat->retry) << 16;
+ stat_cache = msta->status.rate;
+ stat_cache |= ((u32) msta->status.retry) << 16;
+
+ if (*update == 0 && stat_val == stat_cache &&
+ stat->wcid == msta->status.wcid && msta->n_frames < 32) {
+ msta->n_frames++;
+ goto out;
+ }
+
+ mt76_mac_fill_tx_status(dev, &info, &msta->status,
+ msta->n_frames);
+ msta->status = *stat;
+ msta->n_frames = 1;
+ *update = 0;
+ } else {
+ mt76_mac_fill_tx_status(dev, &info, stat, 1);
+ *update = 1;
+ }
+
+ spin_lock_bh(&dev->mac_lock);
+ ieee80211_tx_status_noskb(dev->mt76.hw, sta, &info);
+ spin_unlock_bh(&dev->mac_lock);
+out:
+ rcu_read_unlock();
+}
+
+void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot,
+ int ht_mode)
+{
+ int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+ bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+ u32 prot[6];
+ bool ht_rts[4] = {};
+ int i;
+
+ prot[0] = MT_PROT_NAV_SHORT |
+ MT_PROT_TXOP_ALLOW_ALL |
+ MT_PROT_RTS_THR_EN;
+ prot[1] = prot[0];
+ if (legacy_prot)
+ prot[1] |= MT_PROT_CTRL_CTS2SELF;
+
+ prot[2] = prot[4] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_BW20;
+ prot[3] = prot[5] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_ALL;
+
+ if (legacy_prot) {
+ prot[2] |= MT_PROT_RATE_CCK_11;
+ prot[3] |= MT_PROT_RATE_CCK_11;
+ prot[4] |= MT_PROT_RATE_CCK_11;
+ prot[5] |= MT_PROT_RATE_CCK_11;
+ } else {
+ prot[2] |= MT_PROT_RATE_OFDM_24;
+ prot[3] |= MT_PROT_RATE_DUP_OFDM_24;
+ prot[4] |= MT_PROT_RATE_OFDM_24;
+ prot[5] |= MT_PROT_RATE_DUP_OFDM_24;
+ }
+
+ switch (mode) {
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
+ break;
+
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
+ ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
+ break;
+
+ case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+ ht_rts[1] = ht_rts[3] = true;
+ break;
+
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+ ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
+ break;
+ }
+
+ if (non_gf)
+ ht_rts[2] = ht_rts[3] = true;
+
+ for (i = 0; i < 4; i++)
+ if (ht_rts[i])
+ prot[i + 2] |= MT_PROT_CTRL_RTS_CTS;
+
+ for (i = 0; i < 6; i++)
+ mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
+}
+
+void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb)
+{
+ if (short_preamb)
+ mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
+ else
+ mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
+}
+
+void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval)
+{
+ u32 val = mt76_rr(dev, MT_BEACON_TIME_CFG);
+
+ val &= ~(MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE |
+ MT_BEACON_TIME_CFG_TBTT_EN);
+
+ if (!enable) {
+ mt76_wr(dev, MT_BEACON_TIME_CFG, val);
+ return;
+ }
+
+ val &= ~MT_BEACON_TIME_CFG_INTVAL;
+ val |= FIELD_PREP(MT_BEACON_TIME_CFG_INTVAL, interval << 4) |
+ MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE |
+ MT_BEACON_TIME_CFG_TBTT_EN;
+}
+
+static void mt76x0_check_mac_err(struct mt76x0_dev *dev)
+{
+ u32 val = mt76_rr(dev, 0x10f4);
+
+ if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
+ return;
+
+ dev_err(dev->mt76.dev, "Error: MAC specific condition occurred\n");
+
+ mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
+ udelay(10);
+ mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
+}
+void mt76x0_mac_work(struct work_struct *work)
+{
+ struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
+ mac_work.work);
+ struct {
+ u32 addr_base;
+ u32 span;
+ u64 *stat_base;
+ } spans[] = {
+ { MT_RX_STA_CNT0, 3, dev->stats.rx_stat },
+ { MT_TX_STA_CNT0, 3, dev->stats.tx_stat },
+ { MT_TX_AGG_STAT, 1, dev->stats.aggr_stat },
+ { MT_MPDU_DENSITY_CNT, 1, dev->stats.zero_len_del },
+ { MT_TX_AGG_CNT_BASE0, 8, &dev->stats.aggr_n[0] },
+ { MT_TX_AGG_CNT_BASE1, 8, &dev->stats.aggr_n[16] },
+ };
+ u32 sum, n;
+ int i, j, k;
+
+ /* Note: using MCU_RANDOM_READ is actually slower then reading all the
+ * registers by hand. MCU takes ca. 20ms to complete read of 24
+ * registers while reading them one by one will takes roughly
+ * 24*200us =~ 5ms.
+ */
+
+ k = 0;
+ n = 0;
+ sum = 0;
+ for (i = 0; i < ARRAY_SIZE(spans); i++)
+ for (j = 0; j < spans[i].span; j++) {
+ u32 val = mt76_rr(dev, spans[i].addr_base + j * 4);
+
+ spans[i].stat_base[j * 2] += val & 0xffff;
+ spans[i].stat_base[j * 2 + 1] += val >> 16;
+
+ /* Calculate average AMPDU length */
+ if (spans[i].addr_base != MT_TX_AGG_CNT_BASE0 &&
+ spans[i].addr_base != MT_TX_AGG_CNT_BASE1)
+ continue;
+
+ n += (val >> 16) + (val & 0xffff);
+ sum += (val & 0xffff) * (1 + k * 2) +
+ (val >> 16) * (2 + k * 2);
+ k++;
+ }
+
+ atomic_set(&dev->avg_ampdu_len, n ? DIV_ROUND_CLOSEST(sum, n) : 1);
+
+ mt76x0_check_mac_err(dev);
+
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, 10 * HZ);
+}
+
+void
+mt76x0_mac_wcid_setup(struct mt76x0_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
+{
+ u8 zmac[ETH_ALEN] = {};
+ u32 attr;
+
+ attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
+ FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+
+ mt76_wr(dev, MT_WCID_ATTR(idx), attr);
+
+ if (mac)
+ memcpy(zmac, mac, sizeof(zmac));
+
+ mt76x0_addr_wr(dev, MT_WCID_ADDR(idx), zmac);
+}
+
+void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev)
+{
+ struct ieee80211_sta *sta;
+ struct mt76_wcid *wcid;
+ void *msta;
+ u8 min_factor = 3;
+ int i;
+
+ return;
+
+ rcu_read_lock();
+ for (i = 0; i < ARRAY_SIZE(dev->wcid); i++) {
+ wcid = rcu_dereference(dev->wcid[i]);
+ if (!wcid)
+ continue;
+
+ msta = container_of(wcid, struct mt76_sta, wcid);
+ sta = container_of(msta, struct ieee80211_sta, drv_priv);
+
+ min_factor = min(min_factor, sta->ht_cap.ampdu_factor);
+ }
+ rcu_read_unlock();
+
+ mt76_wr(dev, MT_MAX_LEN_CFG, 0xa0fff |
+ FIELD_PREP(MT_MAX_LEN_CFG_AMPDU, min_factor));
+}
+
+static void
+mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
+{
+ u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+ switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ if (idx >= 8)
+ idx = 0;
+
+ if (status->band == NL80211_BAND_2GHZ)
+ idx += 4;
+
+ status->rate_idx = idx;
+ return;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8) {
+ idx -= 8;
+ status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+ }
+
+ if (idx >= 4)
+ idx = 0;
+
+ status->rate_idx = idx;
+ return;
+ case MT_PHY_TYPE_HT_GF:
+ status->enc_flags |= RX_ENC_FLAG_HT_GF;
+ /* fall through */
+ case MT_PHY_TYPE_HT:
+ status->encoding = RX_ENC_HT;
+ status->rate_idx = idx;
+ break;
+ case MT_PHY_TYPE_VHT:
+ status->encoding = RX_ENC_VHT;
+ status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
+ status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ if (rate & MT_RXWI_RATE_LDPC)
+ status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+ if (rate & MT_RXWI_RATE_SGI)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ if (rate & MT_RXWI_RATE_STBC)
+ status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
+
+ switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+ case MT_PHY_BW_20:
+ break;
+ case MT_PHY_BW_40:
+ status->bw = RATE_INFO_BW_40;
+ break;
+ case MT_PHY_BW_80:
+ status->bw = RATE_INFO_BW_80;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static void
+mt76x0_rx_monitor_beacon(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi,
+ u16 rate, int rssi)
+{
+ dev->bcn_phy_mode = FIELD_GET(MT_RXWI_RATE_PHY, rate);
+ dev->avg_rssi = ((dev->avg_rssi * 15) / 16 + (rssi << 8)) / 256;
+}
+
+static int
+mt76x0_rx_is_our_beacon(struct mt76x0_dev *dev, u8 *data)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
+
+ return ieee80211_is_beacon(hdr->frame_control) &&
+ ether_addr_equal(hdr->addr2, dev->ap_bssid);
+}
+
+u32 mt76x0_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb,
+ u8 *data, void *rxi)
+{
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct mt76x0_rxwi *rxwi = rxi;
+ u32 len, ctl = le32_to_cpu(rxwi->ctl);
+ u16 rate = le16_to_cpu(rxwi->rate);
+ int rssi;
+
+ len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
+ if (WARN_ON(len < 10))
+ return 0;
+
+ if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_DECRYPT)) {
+ status->flag |= RX_FLAG_DECRYPTED;
+ status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
+ }
+
+ status->chains = BIT(0);
+ rssi = mt76x0_phy_get_rssi(dev, rxwi);
+ status->chain_signal[0] = status->signal = rssi;
+ status->freq = dev->mt76.chandef.chan->center_freq;
+ status->band = dev->mt76.chandef.chan->band;
+
+ mt76_mac_process_rate(status, rate);
+
+ spin_lock_bh(&dev->con_mon_lock);
+ if (mt76x0_rx_is_our_beacon(dev, data)) {
+ mt76x0_rx_monitor_beacon(dev, rxwi, rate, rssi);
+ } else if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_U2M)) {
+ if (dev->avg_rssi == 0)
+ dev->avg_rssi = rssi;
+ else
+ dev->avg_rssi = (dev->avg_rssi * 15) / 16 + rssi / 16;
+
+ }
+ spin_unlock_bh(&dev->con_mon_lock);
+
+ return len;
+}
+
+static enum mt76_cipher_type
+mt76_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+ memset(key_data, 0, 32);
+ if (!key)
+ return MT_CIPHER_NONE;
+
+ if (key->keylen > 32)
+ return MT_CIPHER_NONE;
+
+ memcpy(key_data, key->key, key->keylen);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return MT_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return MT_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return MT_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return MT_CIPHER_AES_CCMP;
+ default:
+ return MT_CIPHER_NONE;
+ }
+}
+
+int mt76x0_mac_wcid_set_key(struct mt76x0_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76_cipher_type cipher;
+ u8 key_data[32];
+ u8 iv_data[8];
+ u32 val;
+
+ cipher = mt76_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EINVAL;
+
+ trace_mt76x0_set_key(&dev->mt76, idx);
+
+ mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
+
+ memset(iv_data, 0, sizeof(iv_data));
+ if (key) {
+ iv_data[3] = key->keyidx << 6;
+ if (cipher >= MT_CIPHER_TKIP) {
+ /* Note: start with 1 to comply with spec,
+ * (see comment on common/cmm_wpa.c:4291).
+ */
+ iv_data[0] |= 1;
+ iv_data[3] |= 0x20;
+ }
+ }
+ mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
+
+ val = mt76_rr(dev, MT_WCID_ATTR(idx));
+ val &= ~MT_WCID_ATTR_PKEY_MODE & ~MT_WCID_ATTR_PKEY_MODE_EXT;
+ val |= FIELD_PREP(MT_WCID_ATTR_PKEY_MODE, cipher & 7) |
+ FIELD_PREP(MT_WCID_ATTR_PKEY_MODE_EXT, cipher >> 3);
+ val &= ~MT_WCID_ATTR_PAIRWISE;
+ val |= MT_WCID_ATTR_PAIRWISE *
+ !!(key && key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+ mt76_wr(dev, MT_WCID_ATTR(idx), val);
+
+ return 0;
+}
+
+int mt76x0_mac_shared_key_setup(struct mt76x0_dev *dev, u8 vif_idx, u8 key_idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76_cipher_type cipher;
+ u8 key_data[32];
+ u32 val;
+
+ cipher = mt76_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EINVAL;
+
+ trace_mt76x0_set_shared_key(&dev->mt76, vif_idx, key_idx);
+
+ mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx),
+ key_data, sizeof(key_data));
+
+ val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
+ val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
+ val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
+ mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h
new file mode 100644
index 000000000000..bea067b71c13
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76_MAC_H
+#define __MT76_MAC_H
+
+/* Note: values in original "RSSI" and "SNR" fields are not actually what they
+ * are called for MT76X0U, names used by this driver are educated guesses
+ * (see vendor mac/ral_omac.c).
+ */
+struct mt76x0_rxwi {
+ __le32 rxinfo;
+
+ __le32 ctl;
+
+ __le16 tid_sn;
+ __le16 rate;
+
+ s8 rssi[4];
+
+ __le32 bbp_rxinfo[4];
+} __packed __aligned(4);
+
+#define MT_RXINFO_BA BIT(0)
+#define MT_RXINFO_DATA BIT(1)
+#define MT_RXINFO_NULL BIT(2)
+#define MT_RXINFO_FRAG BIT(3)
+#define MT_RXINFO_U2M BIT(4)
+#define MT_RXINFO_MULTICAST BIT(5)
+#define MT_RXINFO_BROADCAST BIT(6)
+#define MT_RXINFO_MYBSS BIT(7)
+#define MT_RXINFO_CRCERR BIT(8)
+#define MT_RXINFO_ICVERR BIT(9)
+#define MT_RXINFO_MICERR BIT(10)
+#define MT_RXINFO_AMSDU BIT(11)
+#define MT_RXINFO_HTC BIT(12)
+#define MT_RXINFO_RSSI BIT(13)
+#define MT_RXINFO_L2PAD BIT(14)
+#define MT_RXINFO_AMPDU BIT(15)
+#define MT_RXINFO_DECRYPT BIT(16)
+#define MT_RXINFO_BSSIDX3 BIT(17)
+#define MT_RXINFO_WAPI_KEY BIT(18)
+#define MT_RXINFO_PN_LEN GENMASK(21, 19)
+#define MT_RXINFO_SW_PKT_80211 BIT(22)
+#define MT_RXINFO_TCP_SUM_BYPASS BIT(28)
+#define MT_RXINFO_IP_SUM_BYPASS BIT(29)
+#define MT_RXINFO_TCP_SUM_ERR BIT(30)
+#define MT_RXINFO_IP_SUM_ERR BIT(31)
+
+#define MT_RXWI_CTL_WCID GENMASK(7, 0)
+#define MT_RXWI_CTL_KEY_IDX GENMASK(9, 8)
+#define MT_RXWI_CTL_BSS_IDX GENMASK(12, 10)
+#define MT_RXWI_CTL_UDF GENMASK(15, 13)
+#define MT_RXWI_CTL_MPDU_LEN GENMASK(27, 16)
+#define MT_RXWI_CTL_TID GENMASK(31, 28)
+
+#define MT_RXWI_FRAG GENMASK(3, 0)
+#define MT_RXWI_SN GENMASK(15, 4)
+
+#define MT_RXWI_RATE_INDEX GENMASK(5, 0)
+#define MT_RXWI_RATE_LDPC BIT(6)
+#define MT_RXWI_RATE_BW GENMASK(8, 7)
+#define MT_RXWI_RATE_SGI BIT(9)
+#define MT_RXWI_RATE_STBC BIT(10)
+#define MT_RXWI_RATE_LDPC_ETXBF BIT(11)
+#define MT_RXWI_RATE_SND BIT(12)
+#define MT_RXWI_RATE_PHY GENMASK(15, 13)
+
+#define MT_RATE_INDEX_VHT_IDX GENMASK(3, 0)
+#define MT_RATE_INDEX_VHT_NSS GENMASK(5, 4)
+
+#define MT_RXWI_GAIN_RSSI_VAL GENMASK(5, 0)
+#define MT_RXWI_GAIN_RSSI_LNA_ID GENMASK(7, 6)
+#define MT_RXWI_ANT_AUX_LNA BIT(7)
+
+#define MT_RXWI_EANT_ENC_ANT_ID GENMASK(7, 0)
+
+enum mt76_phy_bandwidth {
+ MT_PHY_BW_20,
+ MT_PHY_BW_40,
+ MT_PHY_BW_80,
+};
+
+struct mt76_txwi {
+ __le16 flags;
+ __le16 rate_ctl;
+ u8 ack_ctl;
+ u8 wcid;
+ __le16 len_ctl;
+ __le32 iv;
+ __le32 eiv;
+ u8 aid;
+ u8 txstream;
+ u8 ctl2;
+ u8 pktid;
+} __packed __aligned(4);
+
+#define MT_TXWI_FLAGS_FRAG BIT(0)
+#define MT_TXWI_FLAGS_MMPS BIT(1)
+#define MT_TXWI_FLAGS_CFACK BIT(2)
+#define MT_TXWI_FLAGS_TS BIT(3)
+#define MT_TXWI_FLAGS_AMPDU BIT(4)
+#define MT_TXWI_FLAGS_MPDU_DENSITY GENMASK(7, 5)
+#define MT_TXWI_FLAGS_TXOP GENMASK(9, 8)
+#define MT_TXWI_FLAGS_CWMIN GENMASK(12, 10)
+#define MT_TXWI_FLAGS_NO_RATE_FALLBACK BIT(13)
+#define MT_TXWI_FLAGS_TX_RPT BIT(14)
+#define MT_TXWI_FLAGS_TX_RATE_LUT BIT(15)
+
+#define MT_TXWI_RATE_MCS GENMASK(6, 0)
+#define MT_TXWI_RATE_BW BIT(7)
+#define MT_TXWI_RATE_SGI BIT(8)
+#define MT_TXWI_RATE_STBC GENMASK(10, 9)
+#define MT_TXWI_RATE_PHY_MODE GENMASK(15, 14)
+
+#define MT_TXWI_ACK_CTL_REQ BIT(0)
+#define MT_TXWI_ACK_CTL_NSEQ BIT(1)
+#define MT_TXWI_ACK_CTL_BA_WINDOW GENMASK(7, 2)
+
+#define MT_TXWI_LEN_BYTE_CNT GENMASK(11, 0)
+
+#define MT_TXWI_CTL_TX_POWER_ADJ GENMASK(3, 0)
+#define MT_TXWI_CTL_CHAN_CHECK_PKT BIT(4)
+#define MT_TXWI_CTL_PIFS_REV BIT(6)
+
+#define MT_TXWI_PKTID_PROBE BIT(7)
+
+u32 mt76x0_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb,
+ u8 *data, void *rxi);
+int mt76x0_mac_wcid_set_key(struct mt76x0_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key);
+void mt76x0_mac_wcid_set_rate(struct mt76x0_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate);
+
+int mt76x0_mac_shared_key_setup(struct mt76x0_dev *dev, u8 vif_idx, u8 key_idx,
+ struct ieee80211_key_conf *key);
+u16 mt76x0_mac_tx_rate_val(struct mt76x0_dev *dev,
+ const struct ieee80211_tx_rate *rate, u8 *nss_val);
+struct mt76_tx_status
+mt76x0_mac_fetch_tx_status(struct mt76x0_dev *dev);
+void mt76x0_send_tx_status(struct mt76x0_dev *dev, struct mt76_tx_status *stat, u8 *update);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
new file mode 100644
index 000000000000..cf6ffb1ba4a2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -0,0 +1,403 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "mac.h"
+#include <linux/etherdevice.h>
+
+static int mt76x0_start(struct ieee80211_hw *hw)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ int ret;
+
+ mutex_lock(&dev->mutex);
+
+ ret = mt76x0_mac_start(dev);
+ if (ret)
+ goto out;
+
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
+ MT_CALIBRATE_INTERVAL);
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+out:
+ mutex_unlock(&dev->mutex);
+ return ret;
+}
+
+static void mt76x0_stop(struct ieee80211_hw *hw)
+{
+ struct mt76x0_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mutex);
+
+ cancel_delayed_work_sync(&dev->cal_work);
+ cancel_delayed_work_sync(&dev->mac_work);
+ mt76x0_mac_stop(dev);
+
+ mutex_unlock(&dev->mutex);
+}
+
+
+static int mt76x0_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+ unsigned int idx;
+
+ idx = ffs(~dev->vif_mask);
+ if (!idx || idx > 8)
+ return -ENOSPC;
+
+ idx--;
+ dev->vif_mask |= BIT(idx);
+
+ mvif->idx = idx;
+ mvif->group_wcid.idx = GROUP_WCID(idx);
+ mvif->group_wcid.hw_key_idx = -1;
+
+ return 0;
+}
+
+static void mt76x0_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+ unsigned int wcid = mvif->group_wcid.idx;
+
+ dev->wcid_mask[wcid / BITS_PER_LONG] &= ~BIT(wcid % BITS_PER_LONG);
+}
+
+static int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ int ret = 0;
+
+ mutex_lock(&dev->mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+ dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC;
+ else
+ dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
+
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ieee80211_stop_queues(hw);
+ ret = mt76x0_phy_set_channel(dev, &hw->conf.chandef);
+ ieee80211_wake_queues(hw);
+ }
+
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+
+static void
+mt76_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+ flags |= *total_flags & FIF_##_flag; \
+ dev->rxfilter &= ~(_hw); \
+ dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ } while (0)
+
+ mutex_lock(&dev->mutex);
+
+ dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
+
+ MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
+ MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
+ MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
+ MT_RX_FILTR_CFG_CTS |
+ MT_RX_FILTR_CFG_CFEND |
+ MT_RX_FILTR_CFG_CFACK |
+ MT_RX_FILTR_CFG_BA |
+ MT_RX_FILTR_CFG_CTRL_RSV);
+ MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
+
+ *total_flags = flags;
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+ mutex_unlock(&dev->mutex);
+}
+
+static void
+mt76x0_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
+{
+ struct mt76x0_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mutex);
+
+ if (changed & BSS_CHANGED_ASSOC)
+ mt76x0_phy_con_cal_onoff(dev, info);
+
+ if (changed & BSS_CHANGED_BSSID) {
+ mt76x0_addr_wr(dev, MT_MAC_BSSID_DW0, info->bssid);
+
+ /* Note: this is a hack because beacon_int is not changed
+ * on leave nor is any more appropriate event generated.
+ * rt2x00 doesn't seem to be bothered though.
+ */
+ if (is_zero_ether_addr(info->bssid))
+ mt76x0_mac_config_tsf(dev, false, 0);
+ }
+
+ if (changed & BSS_CHANGED_BASIC_RATES) {
+ mt76_wr(dev, MT_LEGACY_BASIC_RATE, info->basic_rates);
+ mt76_wr(dev, MT_HT_FBK_CFG0, 0x65432100);
+ mt76_wr(dev, MT_HT_FBK_CFG1, 0xedcba980);
+ mt76_wr(dev, MT_LG_FBK_CFG0, 0xedcba988);
+ mt76_wr(dev, MT_LG_FBK_CFG1, 0x00002100);
+ }
+
+ if (changed & BSS_CHANGED_BEACON_INT)
+ mt76x0_mac_config_tsf(dev, true, info->beacon_int);
+
+ if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT)
+ mt76x0_mac_set_protection(dev, info->use_cts_prot,
+ info->ht_operation_mode);
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE)
+ mt76x0_mac_set_short_preamble(dev, info->use_short_preamble);
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ int slottime = info->use_short_slot ? 9 : 20;
+
+ mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
+ MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC)
+ mt76x0_phy_recalibrate_after_assoc(dev);
+
+ mutex_unlock(&dev->mutex);
+}
+
+static int
+mt76x0_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+ struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+ int ret = 0;
+ int idx = 0;
+
+ mutex_lock(&dev->mutex);
+
+ idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ msta->wcid.idx = idx;
+ msta->wcid.hw_key_idx = -1;
+ mt76x0_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
+ mt76_clear(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
+ rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
+ mt76x0_mac_set_ampdu_factor(dev);
+
+out:
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+
+static int
+mt76x0_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+ int idx = msta->wcid.idx;
+
+ mutex_lock(&dev->mutex);
+ rcu_assign_pointer(dev->wcid[idx], NULL);
+ mt76_set(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
+ dev->wcid_mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG);
+ mt76x0_mac_wcid_setup(dev, idx, 0, NULL);
+ mt76x0_mac_set_ampdu_factor(dev);
+ mutex_unlock(&dev->mutex);
+
+ return 0;
+}
+
+static void
+mt76x0_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
+{
+}
+
+static void
+mt76x0_sw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
+{
+ struct mt76x0_dev *dev = hw->priv;
+
+ cancel_delayed_work_sync(&dev->cal_work);
+ mt76x0_agc_save(dev);
+ set_bit(MT76_SCANNING, &dev->mt76.state);
+}
+
+static void
+mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt76x0_dev *dev = hw->priv;
+
+ mt76x0_agc_restore(dev);
+ clear_bit(MT76_SCANNING, &dev->mt76.state);
+
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+}
+
+static int
+mt76x0_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+ struct mt76_sta *msta = sta ? (struct mt76_sta *) sta->drv_priv : NULL;
+ struct mt76_wcid *wcid = msta ? &msta->wcid : &mvif->group_wcid;
+ int idx = key->keyidx;
+ int ret;
+
+ if (cmd == SET_KEY) {
+ key->hw_key_idx = wcid->idx;
+ wcid->hw_key_idx = idx;
+ } else {
+ if (idx == wcid->hw_key_idx)
+ wcid->hw_key_idx = -1;
+
+ key = NULL;
+ }
+
+ if (!msta) {
+ if (key || wcid->hw_key_idx == idx) {
+ ret = mt76x0_mac_wcid_set_key(dev, wcid->idx, key);
+ if (ret)
+ return ret;
+ }
+
+ return mt76x0_mac_shared_key_setup(dev, mvif->idx, idx, key);
+ }
+
+ return mt76x0_mac_wcid_set_key(dev, msta->wcid.idx, key);
+}
+
+static int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct mt76x0_dev *dev = hw->priv;
+
+ mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, value);
+
+ return 0;
+}
+
+static int
+mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
+ struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+
+ WARN_ON(msta->wcid.idx > N_WCIDS);
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ieee80211_send_bar(vif, sta->addr, tid, msta->agg_ssn[tid]);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ msta->agg_ssn[tid] = *ssn << 4;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
+
+ return 0;
+}
+
+static void
+mt76_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+ struct ieee80211_sta_rates *rates;
+ struct ieee80211_tx_rate rate = {};
+
+ rcu_read_lock();
+ rates = rcu_dereference(sta->rates);
+
+ if (!rates)
+ goto out;
+
+ rate.idx = rates->rate[0].idx;
+ rate.flags = rates->rate[0].flags;
+ mt76x0_mac_wcid_set_rate(dev, &msta->wcid, &rate);
+
+out:
+ rcu_read_unlock();
+}
+
+const struct ieee80211_ops mt76x0_ops = {
+ .tx = mt76x0_tx,
+ .start = mt76x0_start,
+ .stop = mt76x0_stop,
+ .add_interface = mt76x0_add_interface,
+ .remove_interface = mt76x0_remove_interface,
+ .config = mt76x0_config,
+ .configure_filter = mt76_configure_filter,
+ .bss_info_changed = mt76x0_bss_info_changed,
+ .sta_add = mt76x0_sta_add,
+ .sta_remove = mt76x0_sta_remove,
+ .sta_notify = mt76x0_sta_notify,
+ .set_key = mt76x0_set_key,
+ .conf_tx = mt76x0_conf_tx,
+ .sw_scan_start = mt76x0_sw_scan,
+ .sw_scan_complete = mt76x0_sw_scan_complete,
+ .ampdu_action = mt76_ampdu_action,
+ .sta_rate_tbl_update = mt76_sta_rate_tbl_update,
+ .set_rts_threshold = mt76x0_set_rts_threshold,
+};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c
new file mode 100644
index 000000000000..8affacbab90a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c
@@ -0,0 +1,656 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/usb.h>
+#include <linux/skbuff.h>
+
+#include "mt76x0.h"
+#include "dma.h"
+#include "mcu.h"
+#include "usb.h"
+#include "trace.h"
+
+#define MCU_FW_URB_MAX_PAYLOAD 0x38f8
+#define MCU_FW_URB_SIZE (MCU_FW_URB_MAX_PAYLOAD + 12)
+#define MCU_RESP_URB_SIZE 1024
+
+static inline int firmware_running(struct mt76x0_dev *dev)
+{
+ return mt76_rr(dev, MT_MCU_COM_REG0) == 1;
+}
+
+static inline void skb_put_le32(struct sk_buff *skb, u32 val)
+{
+ put_unaligned_le32(val, skb_put(skb, 4));
+}
+
+static inline void mt76x0_dma_skb_wrap_cmd(struct sk_buff *skb,
+ u8 seq, enum mcu_cmd cmd)
+{
+ WARN_ON(mt76x0_dma_skb_wrap(skb, CPU_TX_PORT, DMA_COMMAND,
+ FIELD_PREP(MT_TXD_CMD_SEQ, seq) |
+ FIELD_PREP(MT_TXD_CMD_TYPE, cmd)));
+}
+
+static inline void trace_mt76x0_mcu_msg_send_cs(struct mt76_dev *dev,
+ struct sk_buff *skb, bool need_resp)
+{
+ u32 i, csum = 0;
+
+ for (i = 0; i < skb->len / 4; i++)
+ csum ^= get_unaligned_le32(skb->data + i * 4);
+
+ trace_mt76x0_mcu_msg_send(dev, skb, csum, need_resp);
+}
+
+static struct sk_buff *
+mt76x0_mcu_msg_alloc(struct mt76x0_dev *dev, const void *data, int len)
+{
+ struct sk_buff *skb;
+
+ WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */
+
+ skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (skb) {
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+ memcpy(skb_put(skb, len), data, len);
+ }
+ return skb;
+}
+
+static void mt76x0_read_resp_regs(struct mt76x0_dev *dev, int len)
+{
+ int i;
+ int n = dev->mcu.reg_pairs_len;
+ u8 *buf = dev->mcu.resp.buf;
+
+ buf += 4;
+ len -= 8;
+
+ if (dev->mcu.burst_read) {
+ u32 reg = dev->mcu.reg_pairs[0].reg - dev->mcu.reg_base;
+
+ WARN_ON_ONCE(len/4 != n);
+ for (i = 0; i < n; i++) {
+ u32 val = get_unaligned_le32(buf + 4*i);
+
+ dev->mcu.reg_pairs[i].reg = reg++;
+ dev->mcu.reg_pairs[i].value = val;
+ }
+ } else {
+ WARN_ON_ONCE(len/8 != n);
+ for (i = 0; i < n; i++) {
+ u32 reg = get_unaligned_le32(buf + 8*i) - dev->mcu.reg_base;
+ u32 val = get_unaligned_le32(buf + 8*i + 4);
+
+ WARN_ON_ONCE(dev->mcu.reg_pairs[i].reg != reg);
+ dev->mcu.reg_pairs[i].value = val;
+ }
+ }
+}
+
+static int mt76x0_mcu_wait_resp(struct mt76x0_dev *dev, u8 seq)
+{
+ struct urb *urb = dev->mcu.resp.urb;
+ u32 rxfce;
+ int urb_status, ret, try = 5;
+
+ while (try--) {
+ if (!wait_for_completion_timeout(&dev->mcu.resp_cmpl,
+ msecs_to_jiffies(300))) {
+ dev_warn(dev->mt76.dev, "Warning: %s retrying\n", __func__);
+ continue;
+ }
+
+ /* Make copies of important data before reusing the urb */
+ rxfce = get_unaligned_le32(dev->mcu.resp.buf);
+ urb_status = urb->status * mt76x0_urb_has_error(urb);
+
+ if (urb_status == 0 && dev->mcu.reg_pairs)
+ mt76x0_read_resp_regs(dev, urb->actual_length);
+
+ ret = mt76x0_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
+ &dev->mcu.resp, GFP_KERNEL,
+ mt76x0_complete_urb,
+ &dev->mcu.resp_cmpl);
+ if (ret)
+ return ret;
+
+ if (urb_status)
+ dev_err(dev->mt76.dev, "Error: MCU resp urb failed:%d\n",
+ urb_status);
+
+ if (FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq &&
+ FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE)
+ return 0;
+
+ dev_err(dev->mt76.dev, "Error: MCU resp evt:%lx seq:%hhx-%lx!\n",
+ FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce),
+ seq, FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce));
+ }
+
+ dev_err(dev->mt76.dev, "Error: %s timed out\n", __func__);
+ return -ETIMEDOUT;
+}
+
+static int
+__mt76x0_mcu_msg_send(struct mt76x0_dev *dev, struct sk_buff *skb,
+ enum mcu_cmd cmd, bool wait_resp)
+{
+ struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+ unsigned cmd_pipe = usb_sndbulkpipe(usb_dev,
+ dev->out_ep[MT_EP_OUT_INBAND_CMD]);
+ int sent, ret;
+ u8 seq = 0;
+
+ if (wait_resp)
+ while (!seq)
+ seq = ++dev->mcu.msg_seq & 0xf;
+
+ mt76x0_dma_skb_wrap_cmd(skb, seq, cmd);
+
+ if (dev->mcu.resp_cmpl.done)
+ dev_err(dev->mt76.dev, "Error: MCU response pre-completed!\n");
+
+ trace_mt76x0_mcu_msg_send_cs(&dev->mt76, skb, wait_resp);
+ trace_mt76x0_submit_urb_sync(&dev->mt76, cmd_pipe, skb->len);
+
+ ret = usb_bulk_msg(usb_dev, cmd_pipe, skb->data, skb->len, &sent, 500);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Error: send MCU cmd failed:%d\n", ret);
+ goto out;
+ }
+ if (sent != skb->len)
+ dev_err(dev->mt76.dev, "Error: %s sent != skb->len\n", __func__);
+
+ if (wait_resp)
+ ret = mt76x0_mcu_wait_resp(dev, seq);
+
+out:
+ return ret;
+}
+
+static int
+mt76x0_mcu_msg_send(struct mt76x0_dev *dev, struct sk_buff *skb,
+ enum mcu_cmd cmd, bool wait_resp)
+{
+ int ret;
+
+ if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ return 0;
+
+ mutex_lock(&dev->mcu.mutex);
+ ret = __mt76x0_mcu_msg_send(dev, skb, cmd, wait_resp);
+ mutex_unlock(&dev->mcu.mutex);
+
+ consume_skb(skb);
+
+ return ret;
+}
+
+int mt76x0_mcu_function_select(struct mt76x0_dev *dev,
+ enum mcu_function func, u32 val)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(func),
+ .value = cpu_to_le32(val),
+ };
+
+ skb = mt76x0_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76x0_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5);
+}
+
+int
+mt76x0_mcu_calibrate(struct mt76x0_dev *dev, enum mcu_calibrate cal, u32 val)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(cal),
+ .value = cpu_to_le32(val),
+ };
+
+ skb = mt76x0_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76x0_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true);
+}
+
+int mt76x0_write_reg_pairs(struct mt76x0_dev *dev, u32 base,
+ const struct mt76_reg_pair *data, int n)
+{
+ const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8;
+ struct sk_buff *skb;
+ int cnt, i, ret;
+
+ if (!n)
+ return 0;
+
+ cnt = min(max_vals_per_cmd, n);
+
+ skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+
+ for (i = 0; i < cnt; i++) {
+ skb_put_le32(skb, base + data[i].reg);
+ skb_put_le32(skb, data[i].value);
+ }
+
+ ret = mt76x0_mcu_msg_send(dev, skb, CMD_RANDOM_WRITE, cnt == n);
+ if (ret)
+ return ret;
+
+ return mt76x0_write_reg_pairs(dev, base, data + cnt, n - cnt);
+}
+
+int mt76x0_read_reg_pairs(struct mt76x0_dev *dev, u32 base,
+ struct mt76_reg_pair *data, int n)
+{
+ const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8;
+ struct sk_buff *skb;
+ int cnt, i, ret;
+
+ if (!n)
+ return 0;
+
+ cnt = min(max_vals_per_cmd, n);
+ if (cnt != n)
+ return -EINVAL;
+
+ skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+
+ for (i = 0; i < cnt; i++) {
+ skb_put_le32(skb, base + data[i].reg);
+ skb_put_le32(skb, data[i].value);
+ }
+
+ mutex_lock(&dev->mcu.mutex);
+
+ dev->mcu.reg_pairs = data;
+ dev->mcu.reg_pairs_len = n;
+ dev->mcu.reg_base = base;
+ dev->mcu.burst_read = false;
+
+ ret = __mt76x0_mcu_msg_send(dev, skb, CMD_RANDOM_READ, true);
+
+ dev->mcu.reg_pairs = NULL;
+
+ mutex_unlock(&dev->mcu.mutex);
+
+ consume_skb(skb);
+
+ return ret;
+
+}
+
+int mt76x0_burst_write_regs(struct mt76x0_dev *dev, u32 offset,
+ const u32 *data, int n)
+{
+ const int max_regs_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1;
+ struct sk_buff *skb;
+ int cnt, i, ret;
+
+ if (!n)
+ return 0;
+
+ cnt = min(max_regs_per_cmd, n);
+
+ skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+
+ skb_put_le32(skb, MT_MCU_MEMMAP_WLAN + offset);
+ for (i = 0; i < cnt; i++)
+ skb_put_le32(skb, data[i]);
+
+ ret = mt76x0_mcu_msg_send(dev, skb, CMD_BURST_WRITE, cnt == n);
+ if (ret)
+ return ret;
+
+ return mt76x0_burst_write_regs(dev, offset + cnt * 4,
+ data + cnt, n - cnt);
+}
+
+#if 0
+static int mt76x0_burst_read_regs(struct mt76x0_dev *dev, u32 base,
+ struct mt76_reg_pair *data, int n)
+{
+ const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1;
+ struct sk_buff *skb;
+ int cnt, ret;
+
+ if (!n)
+ return 0;
+
+ cnt = min(max_vals_per_cmd, n);
+ if (cnt != n)
+ return -EINVAL;
+
+ skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+
+ skb_put_le32(skb, base + data[0].reg);
+ skb_put_le32(skb, n);
+
+ mutex_lock(&dev->mcu.mutex);
+
+ dev->mcu.reg_pairs = data;
+ dev->mcu.reg_pairs_len = n;
+ dev->mcu.reg_base = base;
+ dev->mcu.burst_read = true;
+
+ ret = __mt76x0_mcu_msg_send(dev, skb, CMD_BURST_READ, true);
+
+ dev->mcu.reg_pairs = NULL;
+
+ mutex_unlock(&dev->mcu.mutex);
+
+ consume_skb(skb);
+
+ return ret;
+}
+#endif
+
+struct mt76_fw_header {
+ __le32 ilm_len;
+ __le32 dlm_len;
+ __le16 build_ver;
+ __le16 fw_ver;
+ u8 pad[4];
+ char build_time[16];
+};
+
+struct mt76_fw {
+ struct mt76_fw_header hdr;
+ u8 ivb[MT_MCU_IVB_SIZE];
+ u8 ilm[];
+};
+
+static int __mt76x0_dma_fw(struct mt76x0_dev *dev,
+ const struct mt76x0_dma_buf *dma_buf,
+ const void *data, u32 len, u32 dst_addr)
+{
+ DECLARE_COMPLETION_ONSTACK(cmpl);
+ struct mt76x0_dma_buf buf = *dma_buf; /* we need to fake length */
+ __le32 reg;
+ u32 val;
+ int ret;
+
+ reg = cpu_to_le32(FIELD_PREP(MT_TXD_INFO_TYPE, DMA_COMMAND) |
+ FIELD_PREP(MT_TXD_INFO_D_PORT, CPU_TX_PORT) |
+ FIELD_PREP(MT_TXD_INFO_LEN, len));
+ memcpy(buf.buf, &reg, sizeof(reg));
+ memcpy(buf.buf + sizeof(reg), data, len);
+ memset(buf.buf + sizeof(reg) + len, 0, 8);
+
+ ret = mt76x0_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
+ MT_FCE_DMA_ADDR, dst_addr);
+ if (ret)
+ return ret;
+ len = roundup(len, 4);
+ ret = mt76x0_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
+ MT_FCE_DMA_LEN, len << 16);
+ if (ret)
+ return ret;
+
+ buf.len = MT_DMA_HDR_LEN + len + 4;
+ ret = mt76x0_usb_submit_buf(dev, USB_DIR_OUT, MT_EP_OUT_INBAND_CMD,
+ &buf, GFP_KERNEL,
+ mt76x0_complete_urb, &cmpl);
+ if (ret)
+ return ret;
+
+ if (!wait_for_completion_timeout(&cmpl, msecs_to_jiffies(1000))) {
+ dev_err(dev->mt76.dev, "Error: firmware upload timed out\n");
+ usb_kill_urb(buf.urb);
+ return -ETIMEDOUT;
+ }
+ if (mt76x0_urb_has_error(buf.urb)) {
+ dev_err(dev->mt76.dev, "Error: firmware upload urb failed:%d\n",
+ buf.urb->status);
+ return buf.urb->status;
+ }
+
+ val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
+ val++;
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
+
+ msleep(5);
+
+ return 0;
+}
+
+static int
+mt76x0_dma_fw(struct mt76x0_dev *dev, struct mt76x0_dma_buf *dma_buf,
+ const void *data, int len, u32 dst_addr)
+{
+ int n, ret;
+
+ if (len == 0)
+ return 0;
+
+ n = min(MCU_FW_URB_MAX_PAYLOAD, len);
+ ret = __mt76x0_dma_fw(dev, dma_buf, data, n, dst_addr);
+ if (ret)
+ return ret;
+
+#if 0
+ if (!mt76_poll_msec(dev, MT_MCU_COM_REG1, BIT(31), BIT(31), 500))
+ return -ETIMEDOUT;
+#endif
+
+ return mt76x0_dma_fw(dev, dma_buf, data + n, len - n, dst_addr + n);
+}
+
+static int
+mt76x0_upload_firmware(struct mt76x0_dev *dev, const struct mt76_fw *fw)
+{
+ struct mt76x0_dma_buf dma_buf;
+ void *ivb;
+ u32 ilm_len, dlm_len;
+ int i, ret;
+
+ ivb = kmemdup(fw->ivb, sizeof(fw->ivb), GFP_KERNEL);
+ if (!ivb)
+ return -ENOMEM;
+ if (mt76x0_usb_alloc_buf(dev, MCU_FW_URB_SIZE, &dma_buf)) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ilm_len = le32_to_cpu(fw->hdr.ilm_len) - sizeof(fw->ivb);
+ dev_dbg(dev->mt76.dev, "loading FW - ILM %u + IVB %zu\n",
+ ilm_len, sizeof(fw->ivb));
+ ret = mt76x0_dma_fw(dev, &dma_buf, fw->ilm, ilm_len, sizeof(fw->ivb));
+ if (ret)
+ goto error;
+
+ dlm_len = le32_to_cpu(fw->hdr.dlm_len);
+ dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len);
+ ret = mt76x0_dma_fw(dev, &dma_buf, fw->ilm + ilm_len,
+ dlm_len, MT_MCU_DLM_OFFSET);
+ if (ret)
+ goto error;
+
+ ret = mt76x0_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
+ 0x12, 0, ivb, sizeof(fw->ivb));
+ if (ret < 0)
+ goto error;
+ ret = 0;
+
+ for (i = 100; i && !firmware_running(dev); i--)
+ msleep(10);
+ if (!i) {
+ ret = -ETIMEDOUT;
+ goto error;
+ }
+
+ dev_dbg(dev->mt76.dev, "Firmware running!\n");
+error:
+ kfree(ivb);
+ mt76x0_usb_free_buf(dev, &dma_buf);
+
+ return ret;
+}
+
+static int mt76x0_load_firmware(struct mt76x0_dev *dev)
+{
+ const struct firmware *fw;
+ const struct mt76_fw_header *hdr;
+ int len, ret;
+ u32 val;
+
+ mt76_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN));
+
+ if (firmware_running(dev))
+ return 0;
+
+ ret = request_firmware(&fw, MT7610_FIRMWARE, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr))
+ goto err_inv_fw;
+
+ hdr = (const struct mt76_fw_header *) fw->data;
+
+ if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE)
+ goto err_inv_fw;
+
+ len = sizeof(*hdr);
+ len += le32_to_cpu(hdr->ilm_len);
+ len += le32_to_cpu(hdr->dlm_len);
+
+ if (fw->size != len)
+ goto err_inv_fw;
+
+ val = le16_to_cpu(hdr->fw_ver);
+ dev_dbg(dev->mt76.dev,
+ "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n",
+ (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf,
+ le16_to_cpu(hdr->build_ver), hdr->build_time);
+
+ len = le32_to_cpu(hdr->ilm_len);
+
+ mt76_wr(dev, 0x1004, 0x2c);
+
+ mt76_set(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN) |
+ FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20));
+ mt76x0_vendor_reset(dev);
+ msleep(5);
+/*
+ mt76x0_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN |
+ MT_PBF_CFG_TX1Q_EN |
+ MT_PBF_CFG_TX2Q_EN |
+ MT_PBF_CFG_TX3Q_EN));
+*/
+
+ mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
+
+ /* FCE tx_fs_base_ptr */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+ /* FCE tx_fs_max_cnt */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1);
+ /* FCE pdma enable */
+ mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+ /* FCE skip_fs_en */
+ mt76_wr(dev, MT_FCE_SKIP_FS, 3);
+
+ val = mt76_rr(dev, MT_USB_DMA_CFG);
+ val |= MT_USB_DMA_CFG_TX_WL_DROP;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
+ val &= ~MT_USB_DMA_CFG_TX_WL_DROP;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
+
+ ret = mt76x0_upload_firmware(dev, (const struct mt76_fw *)fw->data);
+ release_firmware(fw);
+
+ mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
+
+ return ret;
+
+err_inv_fw:
+ dev_err(dev->mt76.dev, "Invalid firmware image\n");
+ release_firmware(fw);
+ return -ENOENT;
+}
+
+int mt76x0_mcu_init(struct mt76x0_dev *dev)
+{
+ int ret;
+
+ mutex_init(&dev->mcu.mutex);
+
+ ret = mt76x0_load_firmware(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+
+ return 0;
+}
+
+int mt76x0_mcu_cmd_init(struct mt76x0_dev *dev)
+{
+ int ret;
+
+ ret = mt76x0_mcu_function_select(dev, Q_SELECT, 1);
+ if (ret)
+ return ret;
+
+ init_completion(&dev->mcu.resp_cmpl);
+ if (mt76x0_usb_alloc_buf(dev, MCU_RESP_URB_SIZE, &dev->mcu.resp)) {
+ mt76x0_usb_free_buf(dev, &dev->mcu.resp);
+ return -ENOMEM;
+ }
+
+ ret = mt76x0_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
+ &dev->mcu.resp, GFP_KERNEL,
+ mt76x0_complete_urb, &dev->mcu.resp_cmpl);
+ if (ret) {
+ mt76x0_usb_free_buf(dev, &dev->mcu.resp);
+ return ret;
+ }
+
+ return 0;
+}
+
+void mt76x0_mcu_cmd_deinit(struct mt76x0_dev *dev)
+{
+ usb_kill_urb(dev->mcu.resp.urb);
+ mt76x0_usb_free_buf(dev, &dev->mcu.resp);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
new file mode 100644
index 000000000000..8c2f77f4c3f5
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_MCU_H
+#define __MT76X0U_MCU_H
+
+struct mt76x0_dev;
+
+/* Register definitions */
+#define MT_MCU_RESET_CTL 0x070C
+#define MT_MCU_INT_LEVEL 0x0718
+#define MT_MCU_COM_REG0 0x0730
+#define MT_MCU_COM_REG1 0x0734
+#define MT_MCU_COM_REG2 0x0738
+#define MT_MCU_COM_REG3 0x073C
+
+#define MT_MCU_IVB_SIZE 0x40
+#define MT_MCU_DLM_OFFSET 0x80000
+
+#define MT_MCU_MEMMAP_WLAN 0x00410000
+/* We use same space for BBP as for MAC regs
+ * #define MT_MCU_MEMMAP_BBP 0x40000000
+ */
+#define MT_MCU_MEMMAP_RF 0x80000000
+
+#define INBAND_PACKET_MAX_LEN 192
+
+enum mcu_cmd {
+ CMD_FUN_SET_OP = 1,
+ CMD_LOAD_CR = 2,
+ CMD_INIT_GAIN_OP = 3,
+ CMD_DYNC_VGA_OP = 6,
+ CMD_TDLS_CH_SW = 7,
+ CMD_BURST_WRITE = 8,
+ CMD_READ_MODIFY_WRITE = 9,
+ CMD_RANDOM_READ = 10,
+ CMD_BURST_READ = 11,
+ CMD_RANDOM_WRITE = 12,
+ CMD_LED_MODE_OP = 16,
+ CMD_POWER_SAVING_OP = 20,
+ CMD_WOW_CONFIG = 21,
+ CMD_WOW_QUERY = 22,
+ CMD_WOW_FEATURE = 24,
+ CMD_CARRIER_DETECT_OP = 28,
+ CMD_RADOR_DETECT_OP = 29,
+ CMD_SWITCH_CHANNEL_OP = 30,
+ CMD_CALIBRATION_OP = 31,
+ CMD_BEACON_OP = 32,
+ CMD_ANTENNA_OP = 33,
+};
+
+enum mcu_function {
+ Q_SELECT = 1,
+ BW_SETTING = 2,
+ ATOMIC_TSSI_SETTING = 5,
+};
+
+enum mcu_power_mode {
+ RADIO_OFF = 0x30,
+ RADIO_ON = 0x31,
+ RADIO_OFF_AUTO_WAKEUP = 0x32,
+ RADIO_OFF_ADVANCE = 0x33,
+ RADIO_ON_ADVANCE = 0x34,
+};
+
+enum mcu_calibrate {
+ MCU_CAL_R = 1,
+ MCU_CAL_RXDCOC,
+ MCU_CAL_LC,
+ MCU_CAL_LOFT,
+ MCU_CAL_TXIQ,
+ MCU_CAL_BW,
+ MCU_CAL_DPD,
+ MCU_CAL_RXIQ,
+ MCU_CAL_TXDCOC,
+ MCU_CAL_RX_GROUP_DELAY,
+ MCU_CAL_TX_GROUP_DELAY,
+};
+
+int mt76x0_mcu_init(struct mt76x0_dev *dev);
+int mt76x0_mcu_cmd_init(struct mt76x0_dev *dev);
+void mt76x0_mcu_cmd_deinit(struct mt76x0_dev *dev);
+
+int
+mt76x0_mcu_calibrate(struct mt76x0_dev *dev, enum mcu_calibrate cal, u32 val);
+
+int
+mt76x0_mcu_function_select(struct mt76x0_dev *dev, enum mcu_function func, u32 val);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
new file mode 100644
index 000000000000..fc9857f61771
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
@@ -0,0 +1,330 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MT76X0U_H
+#define MT76X0U_H
+
+#include <linux/bitfield.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/usb.h>
+#include <linux/completion.h>
+#include <net/mac80211.h>
+#include <linux/debugfs.h>
+
+#include "../mt76.h"
+#include "regs.h"
+
+#define MT_CALIBRATE_INTERVAL (4 * HZ)
+
+#define MT_FREQ_CAL_INIT_DELAY (30 * HZ)
+#define MT_FREQ_CAL_CHECK_INTERVAL (10 * HZ)
+#define MT_FREQ_CAL_ADJ_INTERVAL (HZ / 2)
+
+#define MT_BBP_REG_VERSION 0x00
+
+#define MT_USB_AGGR_SIZE_LIMIT 21 /* * 1024B */
+#define MT_USB_AGGR_TIMEOUT 0x80 /* * 33ns */
+#define MT_RX_ORDER 3
+#define MT_RX_URB_SIZE (PAGE_SIZE << MT_RX_ORDER)
+
+struct mt76x0_dma_buf {
+ struct urb *urb;
+ void *buf;
+ dma_addr_t dma;
+ size_t len;
+};
+
+struct mt76x0_mcu {
+ struct mutex mutex;
+
+ u8 msg_seq;
+
+ struct mt76x0_dma_buf resp;
+ struct completion resp_cmpl;
+
+ struct mt76_reg_pair *reg_pairs;
+ unsigned int reg_pairs_len;
+ u32 reg_base;
+ bool burst_read;
+};
+
+struct mac_stats {
+ u64 rx_stat[6];
+ u64 tx_stat[6];
+ u64 aggr_stat[2];
+ u64 aggr_n[32];
+ u64 zero_len_del[2];
+};
+
+#define N_RX_ENTRIES 16
+struct mt76x0_rx_queue {
+ struct mt76x0_dev *dev;
+
+ struct mt76x0_dma_buf_rx {
+ struct urb *urb;
+ struct page *p;
+ } e[N_RX_ENTRIES];
+
+ unsigned int start;
+ unsigned int end;
+ unsigned int entries;
+ unsigned int pending;
+};
+
+#define N_TX_ENTRIES 64
+
+struct mt76x0_tx_queue {
+ struct mt76x0_dev *dev;
+
+ struct mt76x0_dma_buf_tx {
+ struct urb *urb;
+ struct sk_buff *skb;
+ } e[N_TX_ENTRIES];
+
+ unsigned int start;
+ unsigned int end;
+ unsigned int entries;
+ unsigned int used;
+ unsigned int fifo_seq;
+};
+
+/* WCID allocation:
+ * 0: mcast wcid
+ * 1: bssid wcid
+ * 1...: STAs
+ * ...7e: group wcids
+ * 7f: reserved
+ */
+#define N_WCIDS 128
+#define GROUP_WCID(idx) (254 - idx)
+
+struct mt76x0_eeprom_params;
+
+#define MT_EE_TEMPERATURE_SLOPE 39
+#define MT_FREQ_OFFSET_INVALID -128
+
+/* addr req mask */
+#define MT_VEND_TYPE_EEPROM BIT(31)
+#define MT_VEND_TYPE_CFG BIT(30)
+#define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
+
+#define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n))
+
+enum mt_bw {
+ MT_BW_20,
+ MT_BW_40,
+};
+
+/**
+ * struct mt76x0_dev - adapter structure
+ * @lock: protects @wcid->tx_rate.
+ * @mac_lock: locks out mac80211's tx status and rx paths.
+ * @tx_lock: protects @tx_q and changes of MT76_STATE_*_STATS
+ * flags in @state.
+ * @rx_lock: protects @rx_q.
+ * @con_mon_lock: protects @ap_bssid, @bcn_*, @avg_rssi.
+ * @mutex: ensures exclusive access from mac80211 callbacks.
+ * @reg_atomic_mutex: ensures atomicity of indirect register accesses
+ * (accesses to RF and BBP).
+ * @hw_atomic_mutex: ensures exclusive access to HW during critical
+ * operations (power management, channel switch).
+ */
+struct mt76x0_dev {
+ struct mt76_dev mt76; /* must be first */
+
+ struct mutex mutex;
+
+ struct mutex usb_ctrl_mtx;
+ u8 data[32];
+
+ struct tasklet_struct rx_tasklet;
+ struct tasklet_struct tx_tasklet;
+
+ u8 out_ep[__MT_EP_OUT_MAX];
+ u16 out_max_packet;
+ u8 in_ep[__MT_EP_IN_MAX];
+ u16 in_max_packet;
+
+ unsigned long wcid_mask[DIV_ROUND_UP(N_WCIDS, BITS_PER_LONG)];
+ unsigned long vif_mask;
+
+ struct mt76x0_mcu mcu;
+
+ struct delayed_work cal_work;
+ struct delayed_work mac_work;
+
+ struct workqueue_struct *stat_wq;
+ struct delayed_work stat_work;
+
+ struct mt76_wcid *mon_wcid;
+ struct mt76_wcid __rcu *wcid[N_WCIDS];
+
+ spinlock_t mac_lock;
+
+ const u16 *beacon_offsets;
+
+ u8 macaddr[ETH_ALEN];
+ struct mt76x0_eeprom_params *ee;
+
+ struct mutex reg_atomic_mutex;
+ struct mutex hw_atomic_mutex;
+
+ u32 rxfilter;
+ u32 debugfs_reg;
+
+ /* TX */
+ spinlock_t tx_lock;
+ struct mt76x0_tx_queue *tx_q;
+ struct sk_buff_head tx_skb_done;
+
+ atomic_t avg_ampdu_len;
+
+ /* RX */
+ spinlock_t rx_lock;
+ struct mt76x0_rx_queue rx_q;
+
+ /* Connection monitoring things */
+ spinlock_t con_mon_lock;
+ u8 ap_bssid[ETH_ALEN];
+
+ s8 bcn_freq_off;
+ u8 bcn_phy_mode;
+
+ int avg_rssi; /* starts at 0 and converges */
+
+ u8 agc_save;
+ u16 chainmask;
+
+ struct mac_stats stats;
+};
+
+struct mt76x0_wcid {
+ u8 idx;
+ u8 hw_key_idx;
+
+ u16 tx_rate;
+ bool tx_rate_set;
+ u8 tx_rate_nss;
+};
+
+struct mt76_vif {
+ u8 idx;
+
+ struct mt76_wcid group_wcid;
+};
+
+struct mt76_tx_status {
+ u8 valid:1;
+ u8 success:1;
+ u8 aggr:1;
+ u8 ack_req:1;
+ u8 is_probe:1;
+ u8 wcid;
+ u8 pktid;
+ u8 retry;
+ u16 rate;
+} __packed __aligned(2);
+
+struct mt76_sta {
+ struct mt76_wcid wcid;
+ struct mt76_tx_status status;
+ int n_frames;
+ u16 agg_ssn[IEEE80211_NUM_TIDS];
+};
+
+struct mt76_reg_pair {
+ u32 reg;
+ u32 value;
+};
+
+struct mt76x0_rxwi;
+
+extern const struct ieee80211_ops mt76x0_ops;
+
+static inline bool is_mt7610e(struct mt76x0_dev *dev)
+{
+ /* TODO */
+ return false;
+}
+
+void mt76x0_init_debugfs(struct mt76x0_dev *dev);
+
+int mt76x0_wait_asic_ready(struct mt76x0_dev *dev);
+
+/* Compatibility with mt76 */
+#define mt76_rmw_field(_dev, _reg, _field, _val) \
+ mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
+
+int mt76x0_write_reg_pairs(struct mt76x0_dev *dev, u32 base,
+ const struct mt76_reg_pair *data, int len);
+int mt76x0_read_reg_pairs(struct mt76x0_dev *dev, u32 base,
+ struct mt76_reg_pair *data, int len);
+int mt76x0_burst_write_regs(struct mt76x0_dev *dev, u32 offset,
+ const u32 *data, int n);
+void mt76x0_addr_wr(struct mt76x0_dev *dev, const u32 offset, const u8 *addr);
+
+/* Init */
+struct mt76x0_dev *mt76x0_alloc_device(struct device *dev);
+int mt76x0_init_hardware(struct mt76x0_dev *dev);
+int mt76x0_register_device(struct mt76x0_dev *dev);
+void mt76x0_cleanup(struct mt76x0_dev *dev);
+void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset);
+
+int mt76x0_mac_start(struct mt76x0_dev *dev);
+void mt76x0_mac_stop(struct mt76x0_dev *dev);
+
+/* PHY */
+void mt76x0_phy_init(struct mt76x0_dev *dev);
+int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev);
+void mt76x0_agc_save(struct mt76x0_dev *dev);
+void mt76x0_agc_restore(struct mt76x0_dev *dev);
+int mt76x0_phy_set_channel(struct mt76x0_dev *dev,
+ struct cfg80211_chan_def *chandef);
+void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev);
+int mt76x0_phy_get_rssi(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi);
+void mt76x0_phy_con_cal_onoff(struct mt76x0_dev *dev,
+ struct ieee80211_bss_conf *info);
+
+/* MAC */
+void mt76x0_mac_work(struct work_struct *work);
+void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot,
+ int ht_mode);
+void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb);
+void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval);
+void
+mt76x0_mac_wcid_setup(struct mt76x0_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
+void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev);
+
+/* TX */
+void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
+int mt76x0_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params);
+void mt76x0_tx_status(struct mt76x0_dev *dev, struct sk_buff *skb);
+void mt76x0_tx_stat(struct work_struct *work);
+
+/* util */
+void mt76x0_remove_hdr_pad(struct sk_buff *skb);
+int mt76x0_insert_hdr_pad(struct sk_buff *skb);
+
+int mt76x0_dma_init(struct mt76x0_dev *dev);
+void mt76x0_dma_cleanup(struct mt76x0_dev *dev);
+
+int mt76x0_dma_enqueue_tx(struct mt76x0_dev *dev, struct sk_buff *skb,
+ struct mt76_wcid *wcid, int hw_q);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
new file mode 100644
index 000000000000..5da7bfbe907f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -0,0 +1,1008 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "mcu.h"
+#include "eeprom.h"
+#include "trace.h"
+#include "phy.h"
+#include "initvals.h"
+#include "initvals_phy.h"
+
+#include <linux/etherdevice.h>
+
+static int
+mt76x0_rf_csr_wr(struct mt76x0_dev *dev, u32 offset, u8 value)
+{
+ int ret = 0;
+ u8 bank, reg;
+
+ if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ return -ENODEV;
+
+ bank = MT_RF_BANK(offset);
+ reg = MT_RF_REG(offset);
+
+ if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8)
+ return -EINVAL;
+
+ mutex_lock(&dev->reg_atomic_mutex);
+
+ if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ mt76_wr(dev, MT_RF_CSR_CFG,
+ FIELD_PREP(MT_RF_CSR_CFG_DATA, value) |
+ FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
+ FIELD_PREP(MT_RF_CSR_CFG_REG_ID, reg) |
+ MT_RF_CSR_CFG_WR |
+ MT_RF_CSR_CFG_KICK);
+ trace_mt76x0_rf_write(&dev->mt76, bank, offset, value);
+out:
+ mutex_unlock(&dev->reg_atomic_mutex);
+
+ if (ret < 0)
+ dev_err(dev->mt76.dev, "Error: RF write %d:%d failed:%d!!\n",
+ bank, reg, ret);
+
+ return ret;
+}
+
+static int
+mt76x0_rf_csr_rr(struct mt76x0_dev *dev, u32 offset)
+{
+ int ret = -ETIMEDOUT;
+ u32 val;
+ u8 bank, reg;
+
+ if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ return -ENODEV;
+
+ bank = MT_RF_BANK(offset);
+ reg = MT_RF_REG(offset);
+
+ if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8)
+ return -EINVAL;
+
+ mutex_lock(&dev->reg_atomic_mutex);
+
+ if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
+ goto out;
+
+ mt76_wr(dev, MT_RF_CSR_CFG,
+ FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
+ FIELD_PREP(MT_RF_CSR_CFG_REG_ID, reg) |
+ MT_RF_CSR_CFG_KICK);
+
+ if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
+ goto out;
+
+ val = mt76_rr(dev, MT_RF_CSR_CFG);
+ if (FIELD_GET(MT_RF_CSR_CFG_REG_ID, val) == reg &&
+ FIELD_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank) {
+ ret = FIELD_GET(MT_RF_CSR_CFG_DATA, val);
+ trace_mt76x0_rf_read(&dev->mt76, bank, offset, ret);
+ }
+out:
+ mutex_unlock(&dev->reg_atomic_mutex);
+
+ if (ret < 0)
+ dev_err(dev->mt76.dev, "Error: RF read %d:%d failed:%d!!\n",
+ bank, reg, ret);
+
+ return ret;
+}
+
+static int
+rf_wr(struct mt76x0_dev *dev, u32 offset, u8 val)
+{
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) {
+ struct mt76_reg_pair pair = {
+ .reg = offset,
+ .value = val,
+ };
+
+ return mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_RF, &pair, 1);
+ } else {
+ WARN_ON_ONCE(1);
+ return mt76x0_rf_csr_wr(dev, offset, val);
+ }
+}
+
+static int
+rf_rr(struct mt76x0_dev *dev, u32 offset)
+{
+ int ret;
+ u32 val;
+
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) {
+ struct mt76_reg_pair pair = {
+ .reg = offset,
+ };
+
+ ret = mt76x0_read_reg_pairs(dev, MT_MCU_MEMMAP_RF, &pair, 1);
+ val = pair.value;
+ } else {
+ WARN_ON_ONCE(1);
+ ret = val = mt76x0_rf_csr_rr(dev, offset);
+ }
+
+ return (ret < 0) ? ret : val;
+}
+
+static int
+rf_rmw(struct mt76x0_dev *dev, u32 offset, u8 mask, u8 val)
+{
+ int ret;
+
+ ret = rf_rr(dev, offset);
+ if (ret < 0)
+ return ret;
+ val |= ret & ~mask;
+ ret = rf_wr(dev, offset, val);
+ if (ret)
+ return ret;
+
+ return val;
+}
+
+static int
+rf_set(struct mt76x0_dev *dev, u32 offset, u8 val)
+{
+ return rf_rmw(dev, offset, 0, val);
+}
+
+#if 0
+static int
+rf_clear(struct mt76x0_dev *dev, u32 offset, u8 mask)
+{
+ return rf_rmw(dev, offset, mask, 0);
+}
+#endif
+
+#define RF_RANDOM_WRITE(dev, tab) \
+ mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_RF, tab, ARRAY_SIZE(tab));
+
+int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev)
+{
+ int i = 20;
+ u32 val;
+
+ do {
+ val = mt76_rr(dev, MT_BBP(CORE, 0));
+ printk("BBP version %08x\n", val);
+ if (val && ~val)
+ break;
+ } while (--i);
+
+ if (!i) {
+ dev_err(dev->mt76.dev, "Error: BBP is not ready\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void
+mt76x0_bbp_set_ctrlch(struct mt76x0_dev *dev, enum nl80211_chan_width width,
+ u8 ctrl)
+{
+ int core_val, agc_val;
+
+ switch (width) {
+ case NL80211_CHAN_WIDTH_80:
+ core_val = 3;
+ agc_val = 7;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ core_val = 2;
+ agc_val = 3;
+ break;
+ default:
+ core_val = 0;
+ agc_val = 1;
+ break;
+ }
+
+ mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
+ mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
+ mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
+ mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
+}
+
+int mt76x0_phy_get_rssi(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi)
+{
+ s8 lna_gain, rssi_offset;
+ int val;
+
+ if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ) {
+ lna_gain = dev->ee->lna_gain_2ghz;
+ rssi_offset = dev->ee->rssi_offset_2ghz[0];
+ } else {
+ lna_gain = dev->ee->lna_gain_5ghz[0];
+ rssi_offset = dev->ee->rssi_offset_5ghz[0];
+ }
+
+ val = rxwi->rssi[0] + rssi_offset - lna_gain;
+
+ return val;
+}
+
+static void mt76x0_vco_cal(struct mt76x0_dev *dev, u8 channel)
+{
+ u8 val;
+
+ val = rf_rr(dev, MT_RF(0, 4));
+ if ((val & 0x70) != 0x30)
+ return;
+
+ /*
+ * Calibration Mode - Open loop, closed loop, and amplitude:
+ * B0.R06.[0]: 1
+ * B0.R06.[3:1] bp_close_code: 100
+ * B0.R05.[7:0] bp_open_code: 0x0
+ * B0.R04.[2:0] cal_bits: 000
+ * B0.R03.[2:0] startup_time: 011
+ * B0.R03.[6:4] settle_time:
+ * 80MHz channel: 110
+ * 40MHz channel: 101
+ * 20MHz channel: 100
+ */
+ val = rf_rr(dev, MT_RF(0, 6));
+ val &= ~0xf;
+ val |= 0x09;
+ rf_wr(dev, MT_RF(0, 6), val);
+
+ val = rf_rr(dev, MT_RF(0, 5));
+ if (val != 0)
+ rf_wr(dev, MT_RF(0, 5), 0x0);
+
+ val = rf_rr(dev, MT_RF(0, 4));
+ val &= ~0x07;
+ rf_wr(dev, MT_RF(0, 4), val);
+
+ val = rf_rr(dev, MT_RF(0, 3));
+ val &= ~0x77;
+ if (channel == 1 || channel == 7 || channel == 9 || channel >= 13) {
+ val |= 0x63;
+ } else if (channel == 3 || channel == 4 || channel == 10) {
+ val |= 0x53;
+ } else if (channel == 2 || channel == 5 || channel == 6 ||
+ channel == 8 || channel == 11 || channel == 12) {
+ val |= 0x43;
+ } else {
+ WARN(1, "Unknown channel %u\n", channel);
+ return;
+ }
+ rf_wr(dev, MT_RF(0, 3), val);
+
+ /* TODO replace by mt76x0_rf_set(dev, MT_RF(0, 4), BIT(7)); */
+ val = rf_rr(dev, MT_RF(0, 4));
+ val = ((val & ~(0x80)) | 0x80);
+ rf_wr(dev, MT_RF(0, 4), val);
+
+ msleep(2);
+}
+
+static void
+mt76x0_mac_set_ctrlch(struct mt76x0_dev *dev, bool primary_upper)
+{
+ mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
+ primary_upper);
+}
+
+static void
+mt76x0_phy_set_band(struct mt76x0_dev *dev, enum nl80211_band band)
+{
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ RF_RANDOM_WRITE(dev, mt76x0_rf_2g_channel_0_tab);
+
+ rf_wr(dev, MT_RF(5, 0), 0x45);
+ rf_wr(dev, MT_RF(6, 0), 0x44);
+
+ mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+ mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+
+ mt76_wr(dev, MT_TX_ALC_VGA3, 0x00050007);
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x003E0002);
+ break;
+ case NL80211_BAND_5GHZ:
+ RF_RANDOM_WRITE(dev, mt76x0_rf_5g_channel_0_tab);
+
+ rf_wr(dev, MT_RF(5, 0), 0x44);
+ rf_wr(dev, MT_RF(6, 0), 0x45);
+
+ mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+ mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+
+ mt76_wr(dev, MT_TX_ALC_VGA3, 0x00000005);
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x01010102);
+ break;
+ default:
+ break;
+ }
+}
+
+#define EXT_PA_2G_5G 0x0
+#define EXT_PA_5G_ONLY 0x1
+#define EXT_PA_2G_ONLY 0x2
+#define INT_PA_2G_5G 0x3
+
+static void
+mt76x0_phy_set_chan_rf_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band)
+{
+ u16 rf_band = rf_bw_band & 0xff00;
+ u16 rf_bw = rf_bw_band & 0x00ff;
+ u32 mac_reg;
+ u8 rf_val;
+ int i;
+ bool bSDM = false;
+ const struct mt76x0_freq_item *freq_item;
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_sdm_channel); i++) {
+ if (channel == mt76x0_sdm_channel[i]) {
+ bSDM = true;
+ break;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_frequency_plan); i++) {
+ if (channel == mt76x0_frequency_plan[i].channel) {
+ rf_band = mt76x0_frequency_plan[i].band;
+
+ if (bSDM)
+ freq_item = &(mt76x0_sdm_frequency_plan[i]);
+ else
+ freq_item = &(mt76x0_frequency_plan[i]);
+
+ rf_wr(dev, MT_RF(0, 37), freq_item->pllR37);
+ rf_wr(dev, MT_RF(0, 36), freq_item->pllR36);
+ rf_wr(dev, MT_RF(0, 35), freq_item->pllR35);
+ rf_wr(dev, MT_RF(0, 34), freq_item->pllR34);
+ rf_wr(dev, MT_RF(0, 33), freq_item->pllR33);
+
+ rf_val = rf_rr(dev, MT_RF(0, 32));
+ rf_val &= ~0xE0;
+ rf_val |= freq_item->pllR32_b7b5;
+ rf_wr(dev, MT_RF(0, 32), rf_val);
+
+ /* R32<4:0> pll_den: (Denomina - 8) */
+ rf_val = rf_rr(dev, MT_RF(0, 32));
+ rf_val &= ~0x1F;
+ rf_val |= freq_item->pllR32_b4b0;
+ rf_wr(dev, MT_RF(0, 32), rf_val);
+
+ /* R31<7:5> */
+ rf_val = rf_rr(dev, MT_RF(0, 31));
+ rf_val &= ~0xE0;
+ rf_val |= freq_item->pllR31_b7b5;
+ rf_wr(dev, MT_RF(0, 31), rf_val);
+
+ /* R31<4:0> pll_k(Nominator) */
+ rf_val = rf_rr(dev, MT_RF(0, 31));
+ rf_val &= ~0x1F;
+ rf_val |= freq_item->pllR31_b4b0;
+ rf_wr(dev, MT_RF(0, 31), rf_val);
+
+ /* R30<7> sdm_reset_n */
+ rf_val = rf_rr(dev, MT_RF(0, 30));
+ rf_val &= ~0x80;
+ if (bSDM) {
+ rf_wr(dev, MT_RF(0, 30), rf_val);
+ rf_val |= 0x80;
+ rf_wr(dev, MT_RF(0, 30), rf_val);
+ } else {
+ rf_val |= freq_item->pllR30_b7;
+ rf_wr(dev, MT_RF(0, 30), rf_val);
+ }
+
+ /* R30<6:2> sdmmash_prbs,sin */
+ rf_val = rf_rr(dev, MT_RF(0, 30));
+ rf_val &= ~0x7C;
+ rf_val |= freq_item->pllR30_b6b2;
+ rf_wr(dev, MT_RF(0, 30), rf_val);
+
+ /* R30<1> sdm_bp */
+ rf_val = rf_rr(dev, MT_RF(0, 30));
+ rf_val &= ~0x02;
+ rf_val |= (freq_item->pllR30_b1 << 1);
+ rf_wr(dev, MT_RF(0, 30), rf_val);
+
+ /* R30<0> R29<7:0> (hex) pll_n */
+ rf_val = freq_item->pll_n & 0x00FF;
+ rf_wr(dev, MT_RF(0, 29), rf_val);
+
+ rf_val = rf_rr(dev, MT_RF(0, 30));
+ rf_val &= ~0x1;
+ rf_val |= ((freq_item->pll_n >> 8) & 0x0001);
+ rf_wr(dev, MT_RF(0, 30), rf_val);
+
+ /* R28<7:6> isi_iso */
+ rf_val = rf_rr(dev, MT_RF(0, 28));
+ rf_val &= ~0xC0;
+ rf_val |= freq_item->pllR28_b7b6;
+ rf_wr(dev, MT_RF(0, 28), rf_val);
+
+ /* R28<5:4> pfd_dly */
+ rf_val = rf_rr(dev, MT_RF(0, 28));
+ rf_val &= ~0x30;
+ rf_val |= freq_item->pllR28_b5b4;
+ rf_wr(dev, MT_RF(0, 28), rf_val);
+
+ /* R28<3:2> clksel option */
+ rf_val = rf_rr(dev, MT_RF(0, 28));
+ rf_val &= ~0x0C;
+ rf_val |= freq_item->pllR28_b3b2;
+ rf_wr(dev, MT_RF(0, 28), rf_val);
+
+ /* R28<1:0> R27<7:0> R26<7:0> (hex) sdm_k */
+ rf_val = freq_item->pll_sdm_k & 0x000000FF;
+ rf_wr(dev, MT_RF(0, 26), rf_val);
+
+ rf_val = ((freq_item->pll_sdm_k >> 8) & 0x000000FF);
+ rf_wr(dev, MT_RF(0, 27), rf_val);
+
+ rf_val = rf_rr(dev, MT_RF(0, 28));
+ rf_val &= ~0x3;
+ rf_val |= ((freq_item->pll_sdm_k >> 16) & 0x0003);
+ rf_wr(dev, MT_RF(0, 28), rf_val);
+
+ /* R24<1:0> xo_div */
+ rf_val = rf_rr(dev, MT_RF(0, 24));
+ rf_val &= ~0x3;
+ rf_val |= freq_item->pllR24_b1b0;
+ rf_wr(dev, MT_RF(0, 24), rf_val);
+
+ break;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_rf_bw_switch_tab); i++) {
+ if (rf_bw == mt76x0_rf_bw_switch_tab[i].bw_band) {
+ rf_wr(dev, mt76x0_rf_bw_switch_tab[i].rf_bank_reg,
+ mt76x0_rf_bw_switch_tab[i].value);
+ } else if ((rf_bw == (mt76x0_rf_bw_switch_tab[i].bw_band & 0xFF)) &&
+ (rf_band & mt76x0_rf_bw_switch_tab[i].bw_band)) {
+ rf_wr(dev, mt76x0_rf_bw_switch_tab[i].rf_bank_reg,
+ mt76x0_rf_bw_switch_tab[i].value);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_rf_band_switch_tab); i++) {
+ if (mt76x0_rf_band_switch_tab[i].bw_band & rf_band) {
+ rf_wr(dev, mt76x0_rf_band_switch_tab[i].rf_bank_reg,
+ mt76x0_rf_band_switch_tab[i].value);
+ }
+ }
+
+ mac_reg = mt76_rr(dev, MT_RF_MISC);
+ mac_reg &= ~0xC; /* Clear 0x518[3:2] */
+ mt76_wr(dev, MT_RF_MISC, mac_reg);
+
+ if (dev->ee->pa_type == INT_PA_2G_5G ||
+ (dev->ee->pa_type == EXT_PA_5G_ONLY && (rf_band & RF_G_BAND)) ||
+ (dev->ee->pa_type == EXT_PA_2G_ONLY && (rf_band & RF_A_BAND))) {
+ ; /* Internal PA - nothing to do. */
+ } else {
+ /*
+ MT_RF_MISC (offset: 0x0518)
+ [2]1'b1: enable external A band PA, 1'b0: disable external A band PA
+ [3]1'b1: enable external G band PA, 1'b0: disable external G band PA
+ */
+ if (rf_band & RF_A_BAND) {
+ mac_reg = mt76_rr(dev, MT_RF_MISC);
+ mac_reg |= 0x4;
+ mt76_wr(dev, MT_RF_MISC, mac_reg);
+ } else {
+ mac_reg = mt76_rr(dev, MT_RF_MISC);
+ mac_reg |= 0x8;
+ mt76_wr(dev, MT_RF_MISC, mac_reg);
+ }
+
+ /* External PA */
+ for (i = 0; i < ARRAY_SIZE(mt76x0_rf_ext_pa_tab); i++)
+ if (mt76x0_rf_ext_pa_tab[i].bw_band & rf_band)
+ rf_wr(dev, mt76x0_rf_ext_pa_tab[i].rf_bank_reg,
+ mt76x0_rf_ext_pa_tab[i].value);
+ }
+
+ if (rf_band & RF_G_BAND) {
+ mt76_wr(dev, MT_TX0_RF_GAIN_ATTEN, 0x63707400);
+ /* Set Atten mode = 2 For G band, Disable Tx Inc dcoc. */
+ mac_reg = mt76_rr(dev, MT_TX_ALC_CFG_1);
+ mac_reg &= 0x896400FF;
+ mt76_wr(dev, MT_TX_ALC_CFG_1, mac_reg);
+ } else {
+ mt76_wr(dev, MT_TX0_RF_GAIN_ATTEN, 0x686A7800);
+ /* Set Atten mode = 0 For Ext A band, Disable Tx Inc dcoc Cal. */
+ mac_reg = mt76_rr(dev, MT_TX_ALC_CFG_1);
+ mac_reg &= 0x890400FF;
+ mt76_wr(dev, MT_TX_ALC_CFG_1, mac_reg);
+ }
+}
+
+static void
+mt76x0_phy_set_chan_bbp_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_bbp_switch_tab); i++) {
+ const struct mt76x0_bbp_switch_item *item = &mt76x0_bbp_switch_tab[i];
+ const struct mt76_reg_pair *pair = &item->reg_pair;
+
+ if ((rf_bw_band & item->bw_band) != rf_bw_band)
+ continue;
+
+ if (pair->reg == MT_BBP(AGC, 8)) {
+ u32 val = pair->value;
+ u8 gain = FIELD_GET(MT_BBP_AGC_GAIN, val);
+
+ if (channel > 14) {
+ if (channel < 100)
+ gain -= dev->ee->lna_gain_5ghz[0]*2;
+ else if (channel < 137)
+ gain -= dev->ee->lna_gain_5ghz[1]*2;
+ else
+ gain -= dev->ee->lna_gain_5ghz[2]*2;
+
+ } else {
+ gain -= dev->ee->lna_gain_2ghz*2;
+ }
+
+ val &= ~MT_BBP_AGC_GAIN;
+ val |= FIELD_PREP(MT_BBP_AGC_GAIN, gain);
+ mt76_wr(dev, pair->reg, val);
+ } else {
+ mt76_wr(dev, pair->reg, pair->value);
+ }
+ }
+}
+
+#if 0
+static void
+mt76x0_extra_power_over_mac(struct mt76x0_dev *dev)
+{
+ u32 val;
+
+ val = ((mt76_rr(dev, MT_TX_PWR_CFG_1) & 0x00003f00) >> 8);
+ val |= ((mt76_rr(dev, MT_TX_PWR_CFG_2) & 0x00003f00) << 8);
+ mt76_wr(dev, MT_TX_PWR_CFG_7, val);
+
+ /* TODO: fix VHT */
+ val = ((mt76_rr(dev, MT_TX_PWR_CFG_3) & 0x0000ff00) >> 8);
+ mt76_wr(dev, MT_TX_PWR_CFG_8, val);
+
+ val = ((mt76_rr(dev, MT_TX_PWR_CFG_4) & 0x0000ff00) >> 8);
+ mt76_wr(dev, MT_TX_PWR_CFG_9, val);
+}
+
+static void
+mt76x0_phy_set_tx_power(struct mt76x0_dev *dev, u8 channel, u8 rf_bw_band)
+{
+ u32 val;
+ int i;
+ int bw = (rf_bw_band & RF_BW_20) ? 0 : 1;
+
+ for (i = 0; i < 4; i++) {
+ if (channel <= 14)
+ val = dev->ee->tx_pwr_cfg_2g[i][bw];
+ else
+ val = dev->ee->tx_pwr_cfg_5g[i][bw];
+
+ mt76_wr(dev, MT_TX_PWR_CFG_0 + 4*i, val);
+ }
+
+ mt76x0_extra_power_over_mac(dev);
+}
+#endif
+
+static void
+mt76x0_bbp_set_bw(struct mt76x0_dev *dev, enum nl80211_chan_width width)
+{
+ enum { BW_20 = 0, BW_40 = 1, BW_80 = 2, BW_10 = 4};
+ int bw;
+
+ switch (width) {
+ default:
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ bw = BW_20;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ bw = BW_40;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ bw = BW_80;
+ break;
+ case NL80211_CHAN_WIDTH_10:
+ bw = BW_10;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ case NL80211_CHAN_WIDTH_5:
+ /* TODO error */
+ return ;
+ }
+
+ mt76x0_mcu_function_select(dev, BW_SETTING, bw);
+}
+
+static void
+mt76x0_phy_set_chan_pwr(struct mt76x0_dev *dev, u8 channel)
+{
+ static const int mt76x0_tx_pwr_ch_list[] = {
+ 1,2,3,4,5,6,7,8,9,10,11,12,13,14,
+ 36,38,40,44,46,48,52,54,56,60,62,64,
+ 100,102,104,108,110,112,116,118,120,124,126,128,132,134,136,140,
+ 149,151,153,157,159,161,165,167,169,171,173,
+ 42,58,106,122,155
+ };
+ int i;
+ u32 val;
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_tx_pwr_ch_list); i++)
+ if (mt76x0_tx_pwr_ch_list[i] == channel)
+ break;
+
+ if (WARN_ON(i == ARRAY_SIZE(mt76x0_tx_pwr_ch_list)))
+ return;
+
+ val = mt76_rr(dev, MT_TX_ALC_CFG_0);
+ val &= ~0x3f3f;
+ val |= dev->ee->tx_pwr_per_chan[i];
+ val |= 0x2f2f << 16;
+ mt76_wr(dev, MT_TX_ALC_CFG_0, val);
+}
+
+static int
+__mt76x0_phy_set_channel(struct mt76x0_dev *dev,
+ struct cfg80211_chan_def *chandef)
+{
+ u32 ext_cca_chan[4] = {
+ [0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)),
+ [1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)),
+ [2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)),
+ [3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
+ };
+ bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+ int ch_group_index, freq, freq1;
+ u8 channel;
+ u32 val;
+ u16 rf_bw_band;
+
+ freq = chandef->chan->center_freq;
+ freq1 = chandef->center_freq1;
+ channel = chandef->chan->hw_value;
+ rf_bw_band = (channel <= 14) ? RF_G_BAND : RF_A_BAND;
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_40:
+ if (freq1 > freq)
+ ch_group_index = 0;
+ else
+ ch_group_index = 1;
+ channel += 2 - ch_group_index * 4;
+ rf_bw_band |= RF_BW_40;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ ch_group_index = (freq - freq1 + 30) / 20;
+ if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+ ch_group_index = 0;
+ channel += 6 - ch_group_index * 4;
+ rf_bw_band |= RF_BW_80;
+ break;
+ default:
+ ch_group_index = 0;
+ rf_bw_band |= RF_BW_20;
+ break;
+ }
+
+ mt76x0_bbp_set_bw(dev, chandef->width);
+ mt76x0_bbp_set_ctrlch(dev, chandef->width, ch_group_index);
+ mt76x0_mac_set_ctrlch(dev, ch_group_index & 1);
+
+ mt76_rmw(dev, MT_EXT_CCA_CFG,
+ (MT_EXT_CCA_CFG_CCA0 |
+ MT_EXT_CCA_CFG_CCA1 |
+ MT_EXT_CCA_CFG_CCA2 |
+ MT_EXT_CCA_CFG_CCA3 |
+ MT_EXT_CCA_CFG_CCA_MASK),
+ ext_cca_chan[ch_group_index]);
+
+ mt76x0_phy_set_band(dev, chandef->chan->band);
+ mt76x0_phy_set_chan_rf_params(dev, channel, rf_bw_band);
+
+ /* set Japan Tx filter at channel 14 */
+ val = mt76_rr(dev, MT_BBP(CORE, 1));
+ if (channel == 14)
+ val |= 0x20;
+ else
+ val &= ~0x20;
+ mt76_wr(dev, MT_BBP(CORE, 1), val);
+
+ mt76x0_phy_set_chan_bbp_params(dev, channel, rf_bw_band);
+
+ /* Vendor driver don't do it */
+ /* mt76x0_phy_set_tx_power(dev, channel, rf_bw_band); */
+
+ if (scan)
+ mt76x0_vco_cal(dev, channel);
+
+ mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1);
+ mt76x0_phy_set_chan_pwr(dev, channel);
+
+ dev->mt76.chandef = *chandef;
+ return 0;
+}
+
+int mt76x0_phy_set_channel(struct mt76x0_dev *dev,
+ struct cfg80211_chan_def *chandef)
+{
+ int ret;
+
+ mutex_lock(&dev->hw_atomic_mutex);
+ ret = __mt76x0_phy_set_channel(dev, chandef);
+ mutex_unlock(&dev->hw_atomic_mutex);
+
+ return ret;
+}
+
+void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev)
+{
+ u32 tx_alc, reg_val;
+ u8 channel = dev->mt76.chandef.chan->hw_value;
+ int is_5ghz = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 1 : 0;
+
+ mt76x0_mcu_calibrate(dev, MCU_CAL_R, 0);
+
+ mt76x0_vco_cal(dev, channel);
+
+ tx_alc = mt76_rr(dev, MT_TX_ALC_CFG_0);
+ mt76_wr(dev, MT_TX_ALC_CFG_0, 0);
+ usleep_range(500, 700);
+
+ reg_val = mt76_rr(dev, 0x2124);
+ reg_val &= 0xffffff7e;
+ mt76_wr(dev, 0x2124, reg_val);
+
+ mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 0);
+
+ mt76x0_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz);
+ mt76x0_mcu_calibrate(dev, MCU_CAL_LOFT, is_5ghz);
+ mt76x0_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
+ mt76x0_mcu_calibrate(dev, MCU_CAL_TX_GROUP_DELAY, is_5ghz);
+ mt76x0_mcu_calibrate(dev, MCU_CAL_RXIQ, is_5ghz);
+ mt76x0_mcu_calibrate(dev, MCU_CAL_RX_GROUP_DELAY, is_5ghz);
+
+ mt76_wr(dev, 0x2124, reg_val);
+ mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc);
+ msleep(100);
+
+ mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1);
+}
+
+void mt76x0_agc_save(struct mt76x0_dev *dev)
+{
+ /* Only one RX path */
+ dev->agc_save = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, 8)));
+}
+
+void mt76x0_agc_restore(struct mt76x0_dev *dev)
+{
+ mt76_rmw_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN, dev->agc_save);
+}
+
+static void mt76x0_temp_sensor(struct mt76x0_dev *dev)
+{
+ u8 rf_b7_73, rf_b0_66, rf_b0_67;
+ int cycle, temp;
+ u32 val;
+ s32 sval;
+
+ rf_b7_73 = rf_rr(dev, MT_RF(7, 73));
+ rf_b0_66 = rf_rr(dev, MT_RF(0, 66));
+ rf_b0_67 = rf_rr(dev, MT_RF(0, 73));
+
+ rf_wr(dev, MT_RF(7, 73), 0x02);
+ rf_wr(dev, MT_RF(0, 66), 0x23);
+ rf_wr(dev, MT_RF(0, 73), 0x01);
+
+ mt76_wr(dev, MT_BBP(CORE, 34), 0x00080055);
+
+ for (cycle = 0; cycle < 2000; cycle++) {
+ val = mt76_rr(dev, MT_BBP(CORE, 34));
+ if (!(val & 0x10))
+ break;
+ udelay(3);
+ }
+
+ if (cycle >= 2000) {
+ val &= 0x10;
+ mt76_wr(dev, MT_BBP(CORE, 34), val);
+ goto done;
+ }
+
+ sval = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff;
+ if (!(sval & 0x80))
+ sval &= 0x7f; /* Positive */
+ else
+ sval |= 0xffffff00; /* Negative */
+
+ temp = (35 * (sval - dev->ee->temp_off))/ 10 + 25;
+
+done:
+ rf_wr(dev, MT_RF(7, 73), rf_b7_73);
+ rf_wr(dev, MT_RF(0, 66), rf_b0_66);
+ rf_wr(dev, MT_RF(0, 73), rf_b0_67);
+}
+
+static void mt76x0_dynamic_vga_tuning(struct mt76x0_dev *dev)
+{
+ u32 val, init_vga;
+
+ init_vga = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 0x54 : 0x4E;
+ if (dev->avg_rssi > -60)
+ init_vga -= 0x20;
+ else if (dev->avg_rssi > -70)
+ init_vga -= 0x10;
+
+ val = mt76_rr(dev, MT_BBP(AGC, 8));
+ val &= 0xFFFF80FF;
+ val |= init_vga << 8;
+ mt76_wr(dev, MT_BBP(AGC,8), val);
+}
+
+static void mt76x0_phy_calibrate(struct work_struct *work)
+{
+ struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
+ cal_work.work);
+
+ mt76x0_dynamic_vga_tuning(dev);
+ mt76x0_temp_sensor(dev);
+
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+}
+
+void mt76x0_phy_con_cal_onoff(struct mt76x0_dev *dev,
+ struct ieee80211_bss_conf *info)
+{
+ /* Start/stop collecting beacon data */
+ spin_lock_bh(&dev->con_mon_lock);
+ ether_addr_copy(dev->ap_bssid, info->bssid);
+ dev->avg_rssi = 0;
+ dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID;
+ spin_unlock_bh(&dev->con_mon_lock);
+}
+
+static void
+mt76x0_set_rx_chains(struct mt76x0_dev *dev)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_BBP(AGC, 0));
+ val &= ~(BIT(3) | BIT(4));
+
+ if (dev->chainmask & BIT(1))
+ val |= BIT(3);
+
+ mt76_wr(dev, MT_BBP(AGC, 0), val);
+
+ mb();
+ val = mt76_rr(dev, MT_BBP(AGC, 0));
+}
+
+static void
+mt76x0_set_tx_dac(struct mt76x0_dev *dev)
+{
+ if (dev->chainmask & BIT(1))
+ mt76_set(dev, MT_BBP(TXBE, 5), 3);
+ else
+ mt76_clear(dev, MT_BBP(TXBE, 5), 3);
+}
+
+static void
+mt76x0_rf_init(struct mt76x0_dev *dev)
+{
+ int i;
+ u8 val;
+
+ RF_RANDOM_WRITE(dev, mt76x0_rf_central_tab);
+ RF_RANDOM_WRITE(dev, mt76x0_rf_2g_channel_0_tab);
+ RF_RANDOM_WRITE(dev, mt76x0_rf_5g_channel_0_tab);
+ RF_RANDOM_WRITE(dev, mt76x0_rf_vga_channel_0_tab);
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_rf_bw_switch_tab); i++) {
+ const struct mt76x0_rf_switch_item *item = &mt76x0_rf_bw_switch_tab[i];
+
+ if (item->bw_band == RF_BW_20)
+ rf_wr(dev, item->rf_bank_reg, item->value);
+ else if (((RF_G_BAND | RF_BW_20) & item->bw_band) == (RF_G_BAND | RF_BW_20))
+ rf_wr(dev, item->rf_bank_reg, item->value);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mt76x0_rf_band_switch_tab); i++) {
+ if (mt76x0_rf_band_switch_tab[i].bw_band & RF_G_BAND) {
+ rf_wr(dev,
+ mt76x0_rf_band_switch_tab[i].rf_bank_reg,
+ mt76x0_rf_band_switch_tab[i].value);
+ }
+ }
+
+ /*
+ Frequency calibration
+ E1: B0.R22<6:0>: xo_cxo<6:0>
+ E2: B0.R21<0>: xo_cxo<0>, B0.R22<7:0>: xo_cxo<8:1>
+ */
+ rf_wr(dev, MT_RF(0, 22), min_t(u8, dev->ee->rf_freq_off, 0xBF));
+ val = rf_rr(dev, MT_RF(0, 22));
+
+ /*
+ Reset the DAC (Set B0.R73<7>=1, then set B0.R73<7>=0, and then set B0.R73<7>) during power up.
+ */
+ val = rf_rr(dev, MT_RF(0, 73));
+ val |= 0x80;
+ rf_wr(dev, MT_RF(0, 73), val);
+ val &= ~0x80;
+ rf_wr(dev, MT_RF(0, 73), val);
+ val |= 0x80;
+ rf_wr(dev, MT_RF(0, 73), val);
+
+ /*
+ vcocal_en (initiate VCO calibration (reset after completion)) - It should be at the end of RF configuration.
+ */
+ rf_set(dev, MT_RF(0, 4), 0x80);
+}
+
+static void mt76x0_ant_select(struct mt76x0_dev *dev)
+{
+ /* Single antenna mode. */
+ mt76_rmw(dev, MT_WLAN_FUN_CTRL, BIT(5), BIT(6));
+ mt76_clear(dev, MT_CMB_CTRL, BIT(14) | BIT(12));
+ mt76_clear(dev, MT_COEXCFG0, BIT(2));
+ mt76_rmw(dev, MT_COEXCFG3, BIT(5) | BIT(4) | BIT(3) | BIT(2), BIT(1));
+}
+
+void mt76x0_phy_init(struct mt76x0_dev *dev)
+{
+ INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibrate);
+
+ mt76x0_ant_select(dev);
+
+ mt76x0_rf_init(dev);
+
+ mt76x0_set_rx_chains(dev);
+ mt76x0_set_tx_dac(dev);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h
new file mode 100644
index 000000000000..2880a43c3cb0
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h
@@ -0,0 +1,81 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MT76X0_PHY_H_
+#define _MT76X0_PHY_H_
+
+#define RF_G_BAND 0x0100
+#define RF_A_BAND 0x0200
+#define RF_A_BAND_LB 0x0400
+#define RF_A_BAND_MB 0x0800
+#define RF_A_BAND_HB 0x1000
+#define RF_A_BAND_11J 0x2000
+
+#define RF_BW_20 1
+#define RF_BW_40 2
+#define RF_BW_10 4
+#define RF_BW_80 8
+
+#define MT_RF(bank, reg) ((bank) << 16 | (reg))
+#define MT_RF_BANK(offset) (offset >> 16)
+#define MT_RF_REG(offset) (offset & 0xff)
+
+struct mt76x0_bbp_switch_item {
+ u16 bw_band;
+ struct mt76_reg_pair reg_pair;
+};
+
+struct mt76x0_rf_switch_item {
+ u32 rf_bank_reg;
+ u16 bw_band;
+ u8 value;
+};
+
+struct mt76x0_freq_item {
+ u8 channel;
+ u32 band;
+ u8 pllR37;
+ u8 pllR36;
+ u8 pllR35;
+ u8 pllR34;
+ u8 pllR33;
+ u8 pllR32_b7b5;
+ u8 pllR32_b4b0; /* PLL_DEN (Denomina - 8) */
+ u8 pllR31_b7b5;
+ u8 pllR31_b4b0; /* PLL_K (Nominator *)*/
+ u8 pllR30_b7; /* sdm_reset_n */
+ u8 pllR30_b6b2; /* sdmmash_prbs,sin */
+ u8 pllR30_b1; /* sdm_bp */
+ u16 pll_n; /* R30<0>, R29<7:0> (hex) */
+ u8 pllR28_b7b6; /* isi,iso */
+ u8 pllR28_b5b4; /* pfd_dly */
+ u8 pllR28_b3b2; /* clksel option */
+ u32 pll_sdm_k; /* R28<1:0>, R27<7:0>, R26<7:0> (hex) SDM_k */
+ u8 pllR24_b1b0; /* xo_div */
+};
+
+struct mt76x0_rate_pwr_item {
+ s8 mcs_power;
+ u8 rf_pa_mode;
+};
+
+struct mt76x0_rate_pwr_tab {
+ struct mt76x0_rate_pwr_item cck[4];
+ struct mt76x0_rate_pwr_item ofdm[8];
+ struct mt76x0_rate_pwr_item ht[8];
+ struct mt76x0_rate_pwr_item vht[10];
+ struct mt76x0_rate_pwr_item stbc[8];
+ struct mt76x0_rate_pwr_item mcs32;
+};
+
+#endif /* _MT76X0_PHY_H_ */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h b/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h
new file mode 100644
index 000000000000..16bed4aaa242
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h
@@ -0,0 +1,651 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76_REGS_H
+#define __MT76_REGS_H
+
+#include <linux/bitops.h>
+
+#define MT_ASIC_VERSION 0x0000
+
+#define MT76XX_REV_E3 0x22
+#define MT76XX_REV_E4 0x33
+
+#define MT_CMB_CTRL 0x0020
+#define MT_CMB_CTRL_XTAL_RDY BIT(22)
+#define MT_CMB_CTRL_PLL_LD BIT(23)
+
+#define MT_EFUSE_CTRL 0x0024
+#define MT_EFUSE_CTRL_AOUT GENMASK(5, 0)
+#define MT_EFUSE_CTRL_MODE GENMASK(7, 6)
+#define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8)
+#define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14)
+#define MT_EFUSE_CTRL_AIN GENMASK(25, 16)
+#define MT_EFUSE_CTRL_KICK BIT(30)
+#define MT_EFUSE_CTRL_SEL BIT(31)
+
+#define MT_EFUSE_DATA_BASE 0x0028
+#define MT_EFUSE_DATA(_n) (MT_EFUSE_DATA_BASE + ((_n) << 2))
+
+#define MT_COEXCFG0 0x0040
+#define MT_COEXCFG0_COEX_EN BIT(0)
+
+#define MT_COEXCFG3 0x004c
+
+#define MT_LDO_CTRL_0 0x006c
+#define MT_LDO_CTRL_1 0x0070
+
+#define MT_WLAN_FUN_CTRL 0x0080
+#define MT_WLAN_FUN_CTRL_WLAN_EN BIT(0)
+#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN BIT(1)
+#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF BIT(2)
+
+#define MT_WLAN_FUN_CTRL_WLAN_RESET BIT(3) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN BIT(3) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ BIT(4)
+#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL BIT(5)
+#define MT_WLAN_FUN_CTRL_INV_ANT_SEL BIT(6)
+#define MT_WLAN_FUN_CTRL_WAKE_HOST BIT(7)
+
+#define MT_WLAN_FUN_CTRL_THERM_RST BIT(8) /* MT76x2 */
+#define MT_WLAN_FUN_CTRL_THERM_CKEN BIT(9) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_GPIO_IN GENMASK(15, 8) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT GENMASK(23, 16) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN GENMASK(31, 24) /* MT76x0 */
+
+#define MT_XO_CTRL0 0x0100
+#define MT_XO_CTRL1 0x0104
+#define MT_XO_CTRL2 0x0108
+#define MT_XO_CTRL3 0x010c
+#define MT_XO_CTRL4 0x0110
+
+#define MT_XO_CTRL5 0x0114
+#define MT_XO_CTRL5_C2_VAL GENMASK(14, 8)
+
+#define MT_XO_CTRL6 0x0118
+#define MT_XO_CTRL6_C2_CTRL GENMASK(14, 8)
+
+#define MT_XO_CTRL7 0x011c
+
+#define MT_IOCFG_6 0x0124
+#define MT_WLAN_MTC_CTRL 0x10148
+#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0)
+#define MT_WLAN_MTC_CTRL_PWR_ACK BIT(12)
+#define MT_WLAN_MTC_CTRL_PWR_ACK_S BIT(13)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_PD GENMASK(19, 16)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_PD BIT(20)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_PD BIT(21)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_PD BIT(22)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_RB BIT(24)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_RB BIT(25)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_RB BIT(26)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_RB BIT(27)
+#define MT_WLAN_MTC_CTRL_STATE_UP BIT(28)
+
+#define MT_INT_SOURCE_CSR 0x0200
+#define MT_INT_MASK_CSR 0x0204
+
+#define MT_INT_RX_DONE(_n) BIT(_n)
+#define MT_INT_RX_DONE_ALL GENMASK(1, 0)
+#define MT_INT_TX_DONE_ALL GENMASK(13, 4)
+#define MT_INT_TX_DONE(_n) BIT(_n + 4)
+#define MT_INT_RX_COHERENT BIT(16)
+#define MT_INT_TX_COHERENT BIT(17)
+#define MT_INT_ANY_COHERENT BIT(18)
+#define MT_INT_MCU_CMD BIT(19)
+#define MT_INT_TBTT BIT(20)
+#define MT_INT_PRE_TBTT BIT(21)
+#define MT_INT_TX_STAT BIT(22)
+#define MT_INT_AUTO_WAKEUP BIT(23)
+#define MT_INT_GPTIMER BIT(24)
+#define MT_INT_RXDELAYINT BIT(26)
+#define MT_INT_TXDELAYINT BIT(27)
+
+#define MT_WPDMA_GLO_CFG 0x0208
+#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0)
+#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
+#define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2)
+#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
+#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4)
+#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6)
+#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
+#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN GENMASK(15, 8)
+#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS BIT(30)
+#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
+
+#define MT_WPDMA_RST_IDX 0x020c
+
+#define MT_WPDMA_DELAY_INT_CFG 0x0210
+
+#define MT_WMM_AIFSN 0x0214
+#define MT_WMM_AIFSN_MASK GENMASK(3, 0)
+#define MT_WMM_AIFSN_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_CWMIN 0x0218
+#define MT_WMM_CWMIN_MASK GENMASK(3, 0)
+#define MT_WMM_CWMIN_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_CWMAX 0x021c
+#define MT_WMM_CWMAX_MASK GENMASK(3, 0)
+#define MT_WMM_CWMAX_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_TXOP_BASE 0x0220
+#define MT_WMM_TXOP(_n) (MT_WMM_TXOP_BASE + (((_n) / 2) << 2))
+#define MT_WMM_TXOP_SHIFT(_n) ((_n & 1) * 16)
+#define MT_WMM_TXOP_MASK GENMASK(15, 0)
+
+#define MT_WMM_CTRL 0x0230 /* MT76x0 */
+
+#define MT_FCE_DMA_ADDR 0x0230
+#define MT_FCE_DMA_LEN 0x0234
+
+#define MT_USB_DMA_CFG 0x238
+#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT GENMASK(7, 0)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT GENMASK(15, 8)
+#define MT_USB_DMA_CFG_TX_WL_DROP BIT(16)
+#define MT_USB_DMA_CFG_WAKEUP_EN BIT(17)
+#define MT_USB_DMA_CFG_RX_DROP_OR_PADDING BIT(18)
+#define MT_USB_DMA_CFG_TX_CLR BIT(19)
+#define MT_USB_DMA_CFG_WL_LPK_EN BIT(20)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21)
+#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22)
+#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23)
+#define MT_USB_DMA_CFG_EP_OUT_VALID GENMASK(29, 24)
+#define MT_USB_DMA_CFG_RX_BUSY BIT(30)
+#define MT_USB_DMA_CFG_TX_BUSY BIT(31)
+#if 0
+#define MT_USB_DMA_CFG_TX_CLR BIT(19)
+#define MT_USB_DMA_CFG_TXOP_HALT BIT(20)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21)
+#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22)
+#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23)
+#define MT_USB_DMA_CFG_UDMA_RX_WL_DROP BIT(25)
+#endif
+
+#define MT_TSO_CTRL 0x0250
+#define MT_HEADER_TRANS_CTRL_REG 0x0260
+
+#define MT_US_CYC_CFG 0x02a4
+#define MT_US_CYC_CNT GENMASK(7, 0)
+
+#define MT_TX_RING_BASE 0x0300
+#define MT_RX_RING_BASE 0x03c0
+#define MT_RING_SIZE 0x10
+
+#define MT_TX_HW_QUEUE_MCU 8
+#define MT_TX_HW_QUEUE_MGMT 9
+
+#define MT_PBF_SYS_CTRL 0x0400
+#define MT_PBF_SYS_CTRL_MCU_RESET BIT(0)
+#define MT_PBF_SYS_CTRL_DMA_RESET BIT(1)
+#define MT_PBF_SYS_CTRL_MAC_RESET BIT(2)
+#define MT_PBF_SYS_CTRL_PBF_RESET BIT(3)
+#define MT_PBF_SYS_CTRL_ASY_RESET BIT(4)
+
+#define MT_PBF_CFG 0x0404
+#define MT_PBF_CFG_TX0Q_EN BIT(0)
+#define MT_PBF_CFG_TX1Q_EN BIT(1)
+#define MT_PBF_CFG_TX2Q_EN BIT(2)
+#define MT_PBF_CFG_TX3Q_EN BIT(3)
+#define MT_PBF_CFG_RX0Q_EN BIT(4)
+#define MT_PBF_CFG_RX_DROP_EN BIT(8)
+
+#define MT_PBF_TX_MAX_PCNT 0x0408
+#define MT_PBF_RX_MAX_PCNT 0x040c
+
+#define MT_BCN_OFFSET_BASE 0x041c
+#define MT_BCN_OFFSET(_n) (MT_BCN_OFFSET_BASE + ((_n) << 2))
+
+#define MT_RXQ_STA 0x0430
+#define MT_TXQ_STA 0x0434
+#define MT_RF_CSR_CFG 0x0500
+#define MT_RF_CSR_CFG_DATA GENMASK(7, 0)
+#define MT_RF_CSR_CFG_REG_ID GENMASK(13, 8)
+#define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 14)
+#define MT_RF_CSR_CFG_WR BIT(30)
+#define MT_RF_CSR_CFG_KICK BIT(31)
+
+#define MT_RF_BYPASS_0 0x0504
+#define MT_RF_BYPASS_1 0x0508
+#define MT_RF_SETTING_0 0x050c
+
+#define MT_RF_MISC 0x0518
+#define MT_RF_DATA_WRITE 0x0524
+
+#define MT_RF_CTRL 0x0528
+#define MT_RF_CTRL_ADDR GENMASK(11, 0)
+#define MT_RF_CTRL_WRITE BIT(12)
+#define MT_RF_CTRL_BUSY BIT(13)
+#define MT_RF_CTRL_IDX BIT(16)
+
+#define MT_RF_DATA_READ 0x052c
+
+#define MT_COM_REG0 0x0730
+#define MT_COM_REG1 0x0734
+#define MT_COM_REG2 0x0738
+#define MT_COM_REG3 0x073C
+
+#define MT_FCE_PSE_CTRL 0x0800
+#define MT_FCE_PARAMETERS 0x0804
+#define MT_FCE_CSO 0x0808
+
+#define MT_FCE_L2_STUFF 0x080c
+#define MT_FCE_L2_STUFF_HT_L2_EN BIT(0)
+#define MT_FCE_L2_STUFF_QOS_L2_EN BIT(1)
+#define MT_FCE_L2_STUFF_RX_STUFF_EN BIT(2)
+#define MT_FCE_L2_STUFF_TX_STUFF_EN BIT(3)
+#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN BIT(4)
+#define MT_FCE_L2_STUFF_MVINV_BSWAP BIT(5)
+#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN GENMASK(15, 8)
+#define MT_FCE_L2_STUFF_TS_LEN_EN GENMASK(23, 16)
+#define MT_FCE_L2_STUFF_OTHER_PORT GENMASK(25, 24)
+
+#define MT_FCE_WLAN_FLOW_CONTROL1 0x0824
+
+#define MT_TX_CPU_FROM_FCE_BASE_PTR 0x09a0
+#define MT_TX_CPU_FROM_FCE_MAX_COUNT 0x09a4
+#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8
+
+#define MT_FCE_PDMA_GLOBAL_CONF 0x09c4
+
+#define MT_PAUSE_ENABLE_CONTROL1 0x0a38
+
+#define MT_FCE_SKIP_FS 0x0a6c
+
+#define MT_MAC_CSR0 0x1000
+#define MT_MAC_SYS_CTRL 0x1004
+#define MT_MAC_SYS_CTRL_RESET_CSR BIT(0)
+#define MT_MAC_SYS_CTRL_RESET_BBP BIT(1)
+#define MT_MAC_SYS_CTRL_ENABLE_TX BIT(2)
+#define MT_MAC_SYS_CTRL_ENABLE_RX BIT(3)
+
+#define MT_MAC_ADDR_DW0 0x1008
+#define MT_MAC_ADDR_DW1 0x100c
+#define MT_MAC_ADDR_DW1_U2ME_MASK GENMASK(23, 16)
+
+#define MT_MAC_BSSID_DW0 0x1010
+#define MT_MAC_BSSID_DW1 0x1014
+#define MT_MAC_BSSID_DW1_ADDR GENMASK(15, 0)
+#define MT_MAC_BSSID_DW1_MBSS_MODE GENMASK(17, 16)
+#define MT_MAC_BSSID_DW1_MBEACON_N GENMASK(20, 18)
+#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT BIT(21)
+#define MT_MAC_BSSID_DW1_MBSS_MODE_B2 BIT(22)
+#define MT_MAC_BSSID_DW1_MBEACON_N_B3 BIT(23)
+#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE GENMASK(26, 24)
+
+#define MT_MAX_LEN_CFG 0x1018
+#define MT_MAX_LEN_CFG_AMPDU GENMASK(13, 12)
+
+#define MT_LED_CFG 0x102c
+
+#define MT_AMPDU_MAX_LEN_20M1S 0x1030
+#define MT_AMPDU_MAX_LEN_20M2S 0x1034
+#define MT_AMPDU_MAX_LEN_40M1S 0x1038
+#define MT_AMPDU_MAX_LEN_40M2S 0x103c
+#define MT_AMPDU_MAX_LEN 0x1040
+
+#define MT_WCID_DROP_BASE 0x106c
+#define MT_WCID_DROP(_n) (MT_WCID_DROP_BASE + ((_n) >> 5) * 4)
+#define MT_WCID_DROP_MASK(_n) BIT((_n) % 32)
+
+#define MT_BCN_BYPASS_MASK 0x108c
+
+#define MT_MAC_APC_BSSID_BASE 0x1090
+#define MT_MAC_APC_BSSID_L(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8))
+#define MT_MAC_APC_BSSID_H(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4))
+#define MT_MAC_APC_BSSID_H_ADDR GENMASK(15, 0)
+#define MT_MAC_APC_BSSID0_H_EN BIT(16)
+
+#define MT_XIFS_TIME_CFG 0x1100
+#define MT_XIFS_TIME_CFG_CCK_SIFS GENMASK(7, 0)
+#define MT_XIFS_TIME_CFG_OFDM_SIFS GENMASK(15, 8)
+#define MT_XIFS_TIME_CFG_OFDM_XIFS GENMASK(19, 16)
+#define MT_XIFS_TIME_CFG_EIFS GENMASK(28, 20)
+#define MT_XIFS_TIME_CFG_BB_RXEND_EN BIT(29)
+
+#define MT_BKOFF_SLOT_CFG 0x1104
+#define MT_BKOFF_SLOT_CFG_SLOTTIME GENMASK(7, 0)
+#define MT_BKOFF_SLOT_CFG_CC_DELAY GENMASK(11, 8)
+
+#define MT_BEACON_TIME_CFG 0x1114
+#define MT_BEACON_TIME_CFG_INTVAL GENMASK(15, 0)
+#define MT_BEACON_TIME_CFG_TIMER_EN BIT(16)
+#define MT_BEACON_TIME_CFG_SYNC_MODE GENMASK(18, 17)
+#define MT_BEACON_TIME_CFG_TBTT_EN BIT(19)
+#define MT_BEACON_TIME_CFG_BEACON_TX BIT(20)
+#define MT_BEACON_TIME_CFG_TSF_COMP GENMASK(31, 24)
+
+#define MT_TBTT_SYNC_CFG 0x1118
+#define MT_TBTT_TIMER_CFG 0x1124
+
+#define MT_INT_TIMER_CFG 0x1128
+#define MT_INT_TIMER_CFG_PRE_TBTT GENMASK(15, 0)
+#define MT_INT_TIMER_CFG_GP_TIMER GENMASK(31, 16)
+
+#define MT_INT_TIMER_EN 0x112c
+#define MT_INT_TIMER_EN_PRE_TBTT_EN BIT(0)
+#define MT_INT_TIMER_EN_GP_TIMER_EN BIT(1)
+
+#define MT_MAC_STATUS 0x1200
+#define MT_MAC_STATUS_TX BIT(0)
+#define MT_MAC_STATUS_RX BIT(1)
+
+#define MT_PWR_PIN_CFG 0x1204
+#define MT_AUX_CLK_CFG 0x120c
+
+#define MT_BB_PA_MODE_CFG0 0x1214
+#define MT_BB_PA_MODE_CFG1 0x1218
+#define MT_RF_PA_MODE_CFG0 0x121c
+#define MT_RF_PA_MODE_CFG1 0x1220
+
+#define MT_RF_PA_MODE_ADJ0 0x1228
+#define MT_RF_PA_MODE_ADJ1 0x122c
+
+#define MT_DACCLK_EN_DLY_CFG 0x1264
+
+#define MT_EDCA_CFG_BASE 0x1300
+#define MT_EDCA_CFG_AC(_n) (MT_EDCA_CFG_BASE + ((_n) << 2))
+#define MT_EDCA_CFG_TXOP GENMASK(7, 0)
+#define MT_EDCA_CFG_AIFSN GENMASK(11, 8)
+#define MT_EDCA_CFG_CWMIN GENMASK(15, 12)
+#define MT_EDCA_CFG_CWMAX GENMASK(19, 16)
+
+#define MT_TX_PWR_CFG_0 0x1314
+#define MT_TX_PWR_CFG_1 0x1318
+#define MT_TX_PWR_CFG_2 0x131c
+#define MT_TX_PWR_CFG_3 0x1320
+#define MT_TX_PWR_CFG_4 0x1324
+
+#define MT_TX_BAND_CFG 0x132c
+#define MT_TX_BAND_CFG_UPPER_40M BIT(0)
+#define MT_TX_BAND_CFG_5G BIT(1)
+#define MT_TX_BAND_CFG_2G BIT(2)
+
+#define MT_HT_FBK_TO_LEGACY 0x1384
+#define MT_TX_MPDU_ADJ_INT 0x1388
+
+#define MT_TX_PWR_CFG_7 0x13d4
+#define MT_TX_PWR_CFG_8 0x13d8
+#define MT_TX_PWR_CFG_9 0x13dc
+
+#define MT_TX_SW_CFG0 0x1330
+#define MT_TX_SW_CFG1 0x1334
+#define MT_TX_SW_CFG2 0x1338
+
+#define MT_TXOP_CTRL_CFG 0x1340
+#define MT_TXOP_TRUN_EN GENMASK(5, 0)
+#define MT_TXOP_EXT_CCA_DLY GENMASK(15, 8)
+#define MT_TXOP_CTRL
+
+#define MT_TX_RTS_CFG 0x1344
+#define MT_TX_RTS_CFG_RETRY_LIMIT GENMASK(7, 0)
+#define MT_TX_RTS_CFG_THRESH GENMASK(23, 8)
+#define MT_TX_RTS_FALLBACK BIT(24)
+
+#define MT_TX_TIMEOUT_CFG 0x1348
+#define MT_TX_RETRY_CFG 0x134c
+#define MT_TX_LINK_CFG 0x1350
+#define MT_HT_FBK_CFG0 0x1354
+#define MT_HT_FBK_CFG1 0x1358
+#define MT_LG_FBK_CFG0 0x135c
+#define MT_LG_FBK_CFG1 0x1360
+
+#define MT_CCK_PROT_CFG 0x1364
+#define MT_OFDM_PROT_CFG 0x1368
+#define MT_MM20_PROT_CFG 0x136c
+#define MT_MM40_PROT_CFG 0x1370
+#define MT_GF20_PROT_CFG 0x1374
+#define MT_GF40_PROT_CFG 0x1378
+
+#define MT_PROT_RATE GENMASK(15, 0)
+#define MT_PROT_CTRL_RTS_CTS BIT(16)
+#define MT_PROT_CTRL_CTS2SELF BIT(17)
+#define MT_PROT_NAV_SHORT BIT(18)
+#define MT_PROT_NAV_LONG BIT(19)
+#define MT_PROT_TXOP_ALLOW_CCK BIT(20)
+#define MT_PROT_TXOP_ALLOW_OFDM BIT(21)
+#define MT_PROT_TXOP_ALLOW_MM20 BIT(22)
+#define MT_PROT_TXOP_ALLOW_MM40 BIT(23)
+#define MT_PROT_TXOP_ALLOW_GF20 BIT(24)
+#define MT_PROT_TXOP_ALLOW_GF40 BIT(25)
+#define MT_PROT_RTS_THR_EN BIT(26)
+#define MT_PROT_RATE_CCK_11 0x0003
+#define MT_PROT_RATE_OFDM_6 0x4000
+#define MT_PROT_RATE_OFDM_24 0x4004
+#define MT_PROT_RATE_DUP_OFDM_24 0x4084
+#define MT_PROT_TXOP_ALLOW_ALL GENMASK(25, 20)
+#define MT_PROT_TXOP_ALLOW_BW20 (MT_PROT_TXOP_ALLOW_ALL & \
+ ~MT_PROT_TXOP_ALLOW_MM40 & \
+ ~MT_PROT_TXOP_ALLOW_GF40)
+
+#define MT_EXP_ACK_TIME 0x1380
+
+#define MT_TX_PWR_CFG_0_EXT 0x1390
+#define MT_TX_PWR_CFG_1_EXT 0x1394
+
+#define MT_TX_FBK_LIMIT 0x1398
+#define MT_TX_FBK_LIMIT_MPDU_FBK GENMASK(7, 0)
+#define MT_TX_FBK_LIMIT_AMPDU_FBK GENMASK(15, 8)
+#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR BIT(16)
+#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR BIT(17)
+#define MT_TX_FBK_LIMIT_RATE_LUT BIT(18)
+
+#define MT_TX0_RF_GAIN_CORR 0x13a0
+#define MT_TX1_RF_GAIN_CORR 0x13a4
+#define MT_TX0_RF_GAIN_ATTEN 0x13a8
+
+#define MT_TX_ALC_CFG_0 0x13b0
+#define MT_TX_ALC_CFG_0_CH_INIT_0 GENMASK(5, 0)
+#define MT_TX_ALC_CFG_0_CH_INIT_1 GENMASK(13, 8)
+#define MT_TX_ALC_CFG_0_LIMIT_0 GENMASK(21, 16)
+#define MT_TX_ALC_CFG_0_LIMIT_1 GENMASK(29, 24)
+
+#define MT_TX_ALC_CFG_1 0x13b4
+#define MT_TX_ALC_CFG_1_TEMP_COMP GENMASK(5, 0)
+
+#define MT_TX_ALC_CFG_2 0x13a8
+#define MT_TX_ALC_CFG_2_TEMP_COMP GENMASK(5, 0)
+
+#define MT_TX0_BB_GAIN_ATTEN 0x13c0
+
+#define MT_TX_ALC_VGA3 0x13c8
+
+#define MT_TX_PROT_CFG6 0x13e0
+#define MT_TX_PROT_CFG7 0x13e4
+#define MT_TX_PROT_CFG8 0x13e8
+
+#define MT_PIFS_TX_CFG 0x13ec
+
+#define MT_RX_FILTR_CFG 0x1400
+
+#define MT_RX_FILTR_CFG_CRC_ERR BIT(0)
+#define MT_RX_FILTR_CFG_PHY_ERR BIT(1)
+#define MT_RX_FILTR_CFG_PROMISC BIT(2)
+#define MT_RX_FILTR_CFG_OTHER_BSS BIT(3)
+#define MT_RX_FILTR_CFG_VER_ERR BIT(4)
+#define MT_RX_FILTR_CFG_MCAST BIT(5)
+#define MT_RX_FILTR_CFG_BCAST BIT(6)
+#define MT_RX_FILTR_CFG_DUP BIT(7)
+#define MT_RX_FILTR_CFG_CFACK BIT(8)
+#define MT_RX_FILTR_CFG_CFEND BIT(9)
+#define MT_RX_FILTR_CFG_ACK BIT(10)
+#define MT_RX_FILTR_CFG_CTS BIT(11)
+#define MT_RX_FILTR_CFG_RTS BIT(12)
+#define MT_RX_FILTR_CFG_PSPOLL BIT(13)
+#define MT_RX_FILTR_CFG_BA BIT(14)
+#define MT_RX_FILTR_CFG_BAR BIT(15)
+#define MT_RX_FILTR_CFG_CTRL_RSV BIT(16)
+
+#define MT_AUTO_RSP_CFG 0x1404
+
+#define MT_AUTO_RSP_PREAMB_SHORT BIT(4)
+
+#define MT_LEGACY_BASIC_RATE 0x1408
+#define MT_HT_BASIC_RATE 0x140c
+#define MT_HT_CTRL_CFG 0x1410
+#define MT_RX_PARSER_CFG 0x1418
+#define MT_RX_PARSER_RX_SET_NAV_ALL BIT(0)
+
+#define MT_EXT_CCA_CFG 0x141c
+#define MT_EXT_CCA_CFG_CCA0 GENMASK(1, 0)
+#define MT_EXT_CCA_CFG_CCA1 GENMASK(3, 2)
+#define MT_EXT_CCA_CFG_CCA2 GENMASK(5, 4)
+#define MT_EXT_CCA_CFG_CCA3 GENMASK(7, 6)
+#define MT_EXT_CCA_CFG_CCA_MASK GENMASK(11, 8)
+#define MT_EXT_CCA_CFG_ED_CCA_MASK GENMASK(15, 12)
+
+#define MT_TX_SW_CFG3 0x1478
+
+#define MT_PN_PAD_MODE 0x150c
+
+#define MT_TXOP_HLDR_ET 0x1608
+
+#define MT_PROT_AUTO_TX_CFG 0x1648
+
+#define MT_RX_STA_CNT0 0x1700
+#define MT_RX_STA_CNT1 0x1704
+#define MT_RX_STA_CNT2 0x1708
+#define MT_TX_STA_CNT0 0x170c
+#define MT_TX_STA_CNT1 0x1710
+#define MT_TX_STA_CNT2 0x1714
+
+/* Vendor driver defines content of the second word of STAT_FIFO as follows:
+ * MT_TX_STAT_FIFO_RATE GENMASK(26, 16)
+ * MT_TX_STAT_FIFO_ETXBF BIT(27)
+ * MT_TX_STAT_FIFO_SND BIT(28)
+ * MT_TX_STAT_FIFO_ITXBF BIT(29)
+ * However, tests show that b16-31 have the same layout as TXWI rate_ctl
+ * with rate set to rate at which frame was acked.
+ */
+#define MT_TX_STAT_FIFO 0x1718
+#define MT_TX_STAT_FIFO_VALID BIT(0)
+#define MT_TX_STAT_FIFO_SUCCESS BIT(5)
+#define MT_TX_STAT_FIFO_AGGR BIT(6)
+#define MT_TX_STAT_FIFO_ACKREQ BIT(7)
+#define MT_TX_STAT_FIFO_WCID GENMASK(15, 8)
+#define MT_TX_STAT_FIFO_RATE GENMASK(31, 16)
+
+#define MT_TX_AGG_STAT 0x171c
+
+#define MT_TX_AGG_CNT_BASE0 0x1720
+
+#define MT_MPDU_DENSITY_CNT 0x1740
+
+#define MT_TX_AGG_CNT_BASE1 0x174c
+
+#define MT_TX_AGG_CNT(_id) ((_id) < 8 ? \
+ MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \
+ MT_TX_AGG_CNT_BASE1 + ((_id - 8) << 2))
+
+#define MT_TX_STAT_FIFO_EXT 0x1798
+#define MT_TX_STAT_FIFO_EXT_RETRY GENMASK(7, 0)
+#define MT_TX_STAT_FIFO_EXT_PKTID GENMASK(15, 8)
+
+#define MT_BBP_CORE_BASE 0x2000
+#define MT_BBP_IBI_BASE 0x2100
+#define MT_BBP_AGC_BASE 0x2300
+#define MT_BBP_TXC_BASE 0x2400
+#define MT_BBP_RXC_BASE 0x2500
+#define MT_BBP_TXO_BASE 0x2600
+#define MT_BBP_TXBE_BASE 0x2700
+#define MT_BBP_RXFE_BASE 0x2800
+#define MT_BBP_RXO_BASE 0x2900
+#define MT_BBP_DFS_BASE 0x2a00
+#define MT_BBP_TR_BASE 0x2b00
+#define MT_BBP_CAL_BASE 0x2c00
+#define MT_BBP_DSC_BASE 0x2e00
+#define MT_BBP_PFMU_BASE 0x2f00
+
+#define MT_BBP(_type, _n) (MT_BBP_##_type##_BASE + ((_n) << 2))
+
+#define MT_BBP_CORE_R1_BW GENMASK(4, 3)
+
+#define MT_BBP_AGC_R0_CTRL_CHAN GENMASK(9, 8)
+#define MT_BBP_AGC_R0_BW GENMASK(14, 12)
+
+/* AGC, R4/R5 */
+#define MT_BBP_AGC_LNA_GAIN GENMASK(21, 16)
+
+/* AGC, R8/R9 */
+#define MT_BBP_AGC_GAIN GENMASK(14, 8)
+
+#define MT_BBP_AGC20_RSSI0 GENMASK(7, 0)
+#define MT_BBP_AGC20_RSSI1 GENMASK(15, 8)
+
+#define MT_BBP_TXBE_R0_CTRL_CHAN GENMASK(1, 0)
+
+#define MT_WCID_ADDR_BASE 0x1800
+#define MT_WCID_ADDR(_n) (MT_WCID_ADDR_BASE + (_n) * 8)
+
+#define MT_SRAM_BASE 0x4000
+
+#define MT_WCID_KEY_BASE 0x8000
+#define MT_WCID_KEY(_n) (MT_WCID_KEY_BASE + (_n) * 32)
+
+#define MT_WCID_IV_BASE 0xa000
+#define MT_WCID_IV(_n) (MT_WCID_IV_BASE + (_n) * 8)
+
+#define MT_WCID_ATTR_BASE 0xa800
+#define MT_WCID_ATTR(_n) (MT_WCID_ATTR_BASE + (_n) * 4)
+
+#define MT_WCID_ATTR_PAIRWISE BIT(0)
+#define MT_WCID_ATTR_PKEY_MODE GENMASK(3, 1)
+#define MT_WCID_ATTR_BSS_IDX GENMASK(6, 4)
+#define MT_WCID_ATTR_RXWI_UDF GENMASK(9, 7)
+#define MT_WCID_ATTR_PKEY_MODE_EXT BIT(10)
+#define MT_WCID_ATTR_BSS_IDX_EXT BIT(11)
+#define MT_WCID_ATTR_WAPI_MCBC BIT(15)
+#define MT_WCID_ATTR_WAPI_KEYID GENMASK(31, 24)
+
+#define MT_SKEY_BASE_0 0xac00
+#define MT_SKEY_BASE_1 0xb400
+#define MT_SKEY_0(_bss, _idx) \
+ (MT_SKEY_BASE_0 + (4 * (_bss) + _idx) * 32)
+#define MT_SKEY_1(_bss, _idx) \
+ (MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + _idx) * 32)
+#define MT_SKEY(_bss, _idx) \
+ ((_bss & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx))
+
+#define MT_SKEY_MODE_BASE_0 0xb000
+#define MT_SKEY_MODE_BASE_1 0xb3f0
+#define MT_SKEY_MODE_0(_bss) \
+ (MT_SKEY_MODE_BASE_0 + ((_bss / 2) << 2))
+#define MT_SKEY_MODE_1(_bss) \
+ (MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2))
+#define MT_SKEY_MODE(_bss) \
+ ((_bss & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss))
+#define MT_SKEY_MODE_MASK GENMASK(3, 0)
+#define MT_SKEY_MODE_SHIFT(_bss, _idx) (4 * ((_idx) + 4 * (_bss & 1)))
+
+#define MT_BEACON_BASE 0xc000
+
+#define MT_TEMP_SENSOR 0x1d000
+#define MT_TEMP_SENSOR_VAL GENMASK(6, 0)
+
+enum mt76_cipher_type {
+ MT_CIPHER_NONE,
+ MT_CIPHER_WEP40,
+ MT_CIPHER_WEP104,
+ MT_CIPHER_TKIP,
+ MT_CIPHER_AES_CCMP,
+ MT_CIPHER_CKIP40,
+ MT_CIPHER_CKIP104,
+ MT_CIPHER_CKIP128,
+ MT_CIPHER_WAPI,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.c b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.c
new file mode 100644
index 000000000000..8abdd3cd546d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.c
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h
new file mode 100644
index 000000000000..8a752a09f2dc
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h
@@ -0,0 +1,313 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(__MT76X0U_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76X0U_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt76x0.h"
+#include "mac.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt76x0
+
+#define MAXNAME 32
+#define DEV_ENTRY __array(char, wiphy_name, 32)
+#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \
+ wiphy_name(dev->hw->wiphy), MAXNAME)
+#define DEV_PR_FMT "%s "
+#define DEV_PR_ARG __entry->wiphy_name
+
+#define REG_ENTRY __field(u32, reg) __field(u32, val)
+#define REG_ASSIGN __entry->reg = reg; __entry->val = val
+#define REG_PR_FMT "%04x=%08x"
+#define REG_PR_ARG __entry->reg, __entry->val
+
+DECLARE_EVENT_CLASS(dev_reg_evt,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ REG_ENTRY
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ REG_ASSIGN;
+ ),
+ TP_printk(
+ DEV_PR_FMT REG_PR_FMT,
+ DEV_PR_ARG, REG_PR_ARG
+ )
+);
+
+DEFINE_EVENT(dev_reg_evt, mt76x0_reg_read,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_reg_evt, mt76x0_reg_write,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+TRACE_EVENT(mt76x0_submit_urb,
+ TP_PROTO(struct mt76_dev *dev, struct urb *u),
+ TP_ARGS(dev, u),
+ TP_STRUCT__entry(
+ DEV_ENTRY __field(unsigned, pipe) __field(u32, len)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->pipe = u->pipe;
+ __entry->len = u->transfer_buffer_length;
+ ),
+ TP_printk(DEV_PR_FMT "p:%08x len:%u",
+ DEV_PR_ARG, __entry->pipe, __entry->len)
+);
+
+#define trace_mt76x0_submit_urb_sync(__dev, __pipe, __len) ({ \
+ struct urb u; \
+ u.pipe = __pipe; \
+ u.transfer_buffer_length = __len; \
+ trace_mt76x0_submit_urb(__dev, &u); \
+})
+
+TRACE_EVENT(mt76x0_mcu_msg_send,
+ TP_PROTO(struct mt76_dev *dev,
+ struct sk_buff *skb, u32 csum, bool resp),
+ TP_ARGS(dev, skb, csum, resp),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, info)
+ __field(u32, csum)
+ __field(bool, resp)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->info = *(u32 *)skb->data;
+ __entry->csum = csum;
+ __entry->resp = resp;
+ ),
+ TP_printk(DEV_PR_FMT "i:%08x c:%08x r:%d",
+ DEV_PR_ARG, __entry->info, __entry->csum, __entry->resp)
+);
+
+TRACE_EVENT(mt76x0_vend_req,
+ TP_PROTO(struct mt76_dev *dev, unsigned pipe, u8 req, u8 req_type,
+ u16 val, u16 offset, void *buf, size_t buflen, int ret),
+ TP_ARGS(dev, pipe, req, req_type, val, offset, buf, buflen, ret),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(unsigned, pipe) __field(u8, req) __field(u8, req_type)
+ __field(u16, val) __field(u16, offset) __field(void*, buf)
+ __field(int, buflen) __field(int, ret)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->pipe = pipe;
+ __entry->req = req;
+ __entry->req_type = req_type;
+ __entry->val = val;
+ __entry->offset = offset;
+ __entry->buf = buf;
+ __entry->buflen = buflen;
+ __entry->ret = ret;
+ ),
+ TP_printk(DEV_PR_FMT
+ "%d p:%08x req:%02hhx %02hhx val:%04hx %04hx buf:%d %d",
+ DEV_PR_ARG, __entry->ret, __entry->pipe, __entry->req,
+ __entry->req_type, __entry->val, __entry->offset,
+ !!__entry->buf, __entry->buflen)
+);
+
+DECLARE_EVENT_CLASS(dev_rf_reg_evt,
+ TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val),
+ TP_ARGS(dev, bank, reg, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, bank)
+ __field(u8, reg)
+ __field(u8, val)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ REG_ASSIGN;
+ __entry->bank = bank;
+ ),
+ TP_printk(
+ DEV_PR_FMT "%02hhx:%02hhx=%02hhx",
+ DEV_PR_ARG, __entry->bank, __entry->reg, __entry->val
+ )
+);
+
+DEFINE_EVENT(dev_rf_reg_evt, mt76x0_rf_read,
+ TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val),
+ TP_ARGS(dev, bank, reg, val)
+);
+
+DEFINE_EVENT(dev_rf_reg_evt, mt76x0_rf_write,
+ TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val),
+ TP_ARGS(dev, bank, reg, val)
+);
+
+DECLARE_EVENT_CLASS(dev_simple_evt,
+ TP_PROTO(struct mt76_dev *dev, u8 val),
+ TP_ARGS(dev, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, val)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->val = val;
+ ),
+ TP_printk(
+ DEV_PR_FMT "%02hhx", DEV_PR_ARG, __entry->val
+ )
+);
+
+TRACE_EVENT(mt76x0_rx,
+ TP_PROTO(struct mt76_dev *dev, struct mt76x0_rxwi *rxwi, u32 f),
+ TP_ARGS(dev, rxwi, f),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field_struct(struct mt76x0_rxwi, rxwi)
+ __field(u32, fce_info)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->rxwi = *rxwi;
+ __entry->fce_info = f;
+ ),
+ TP_printk(DEV_PR_FMT "rxi:%08x ctl:%08x", DEV_PR_ARG,
+ le32_to_cpu(__entry->rxwi.rxinfo),
+ le32_to_cpu(__entry->rxwi.ctl))
+);
+
+TRACE_EVENT(mt76x0_tx,
+ TP_PROTO(struct mt76_dev *dev, struct sk_buff *skb,
+ struct mt76_sta *sta, struct mt76_txwi *h),
+ TP_ARGS(dev, skb, sta, h),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field_struct(struct mt76_txwi, h)
+ __field(struct sk_buff *, skb)
+ __field(struct mt76_sta *, sta)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->h = *h;
+ __entry->skb = skb;
+ __entry->sta = sta;
+ ),
+ TP_printk(DEV_PR_FMT "skb:%p sta:%p flg:%04hx rate_ctl:%04hx "
+ "ack:%02hhx wcid:%02hhx len_ctl:%05hx", DEV_PR_ARG,
+ __entry->skb, __entry->sta,
+ le16_to_cpu(__entry->h.flags),
+ le16_to_cpu(__entry->h.rate_ctl),
+ __entry->h.ack_ctl, __entry->h.wcid,
+ le16_to_cpu(__entry->h.len_ctl))
+);
+
+TRACE_EVENT(mt76x0_tx_dma_done,
+ TP_PROTO(struct mt76_dev *dev, struct sk_buff *skb),
+ TP_ARGS(dev, skb),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(struct sk_buff *, skb)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->skb = skb;
+ ),
+ TP_printk(DEV_PR_FMT "%p", DEV_PR_ARG, __entry->skb)
+);
+
+TRACE_EVENT(mt76x0_tx_status_cleaned,
+ TP_PROTO(struct mt76_dev *dev, int cleaned),
+ TP_ARGS(dev, cleaned),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(int, cleaned)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->cleaned = cleaned;
+ ),
+ TP_printk(DEV_PR_FMT "%d", DEV_PR_ARG, __entry->cleaned)
+);
+
+TRACE_EVENT(mt76x0_tx_status,
+ TP_PROTO(struct mt76_dev *dev, u32 stat1, u32 stat2),
+ TP_ARGS(dev, stat1, stat2),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, stat1) __field(u32, stat2)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->stat1 = stat1;
+ __entry->stat2 = stat2;
+ ),
+ TP_printk(DEV_PR_FMT "%08x %08x",
+ DEV_PR_ARG, __entry->stat1, __entry->stat2)
+);
+
+TRACE_EVENT(mt76x0_rx_dma_aggr,
+ TP_PROTO(struct mt76_dev *dev, int cnt, bool paged),
+ TP_ARGS(dev, cnt, paged),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, cnt)
+ __field(bool, paged)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->cnt = cnt;
+ __entry->paged = paged;
+ ),
+ TP_printk(DEV_PR_FMT "cnt:%d paged:%d",
+ DEV_PR_ARG, __entry->cnt, __entry->paged)
+);
+
+DEFINE_EVENT(dev_simple_evt, mt76x0_set_key,
+ TP_PROTO(struct mt76_dev *dev, u8 val),
+ TP_ARGS(dev, val)
+);
+
+TRACE_EVENT(mt76x0_set_shared_key,
+ TP_PROTO(struct mt76_dev *dev, u8 vid, u8 key),
+ TP_ARGS(dev, vid, key),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, vid)
+ __field(u8, key)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->vid = vid;
+ __entry->key = key;
+ ),
+ TP_printk(DEV_PR_FMT "phy:%02hhx off:%02hhx",
+ DEV_PR_ARG, __entry->vid, __entry->key)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c b/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c
new file mode 100644
index 000000000000..751b49c28ae5
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+#include "trace.h"
+
+/* Take mac80211 Q id from the skb and translate it to hardware Q id */
+static u8 skb2q(struct sk_buff *skb)
+{
+ int qid = skb_get_queue_mapping(skb);
+
+ if (WARN_ON(qid >= MT_TXQ_PSD)) {
+ qid = MT_TXQ_BE;
+ skb_set_queue_mapping(skb, qid);
+ }
+
+ return q2hwq(qid);
+}
+
+static void mt76x0_tx_skb_remove_dma_overhead(struct sk_buff *skb,
+ struct ieee80211_tx_info *info)
+{
+ int pkt_len = (unsigned long)info->status.status_driver_data[0];
+
+ skb_pull(skb, sizeof(struct mt76_txwi) + 4);
+ if (ieee80211_get_hdrlen_from_skb(skb) % 4)
+ mt76x0_remove_hdr_pad(skb);
+
+ skb_trim(skb, pkt_len);
+}
+
+void mt76x0_tx_status(struct mt76x0_dev *dev, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ mt76x0_tx_skb_remove_dma_overhead(skb, info);
+
+ ieee80211_tx_info_clear_status(info);
+ info->status.rates[0].idx = -1;
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ spin_lock(&dev->mac_lock);
+ ieee80211_tx_status(dev->mt76.hw, skb);
+ spin_unlock(&dev->mac_lock);
+}
+
+static int mt76x0_skb_rooms(struct mt76x0_dev *dev, struct sk_buff *skb)
+{
+ int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+ u32 need_head;
+
+ need_head = sizeof(struct mt76_txwi) + 4;
+ if (hdr_len % 4)
+ need_head += 2;
+
+ return skb_cow(skb, need_head);
+}
+
+static struct mt76_txwi *
+mt76x0_push_txwi(struct mt76x0_dev *dev, struct sk_buff *skb,
+ struct ieee80211_sta *sta, struct mt76_wcid *wcid,
+ int pkt_len)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *rate = &info->control.rates[0];
+ struct mt76_txwi *txwi;
+ unsigned long flags;
+ u16 txwi_flags = 0;
+ u32 pkt_id;
+ u16 rate_ctl;
+ u8 nss;
+
+ txwi = (struct mt76_txwi *)skb_push(skb, sizeof(struct mt76_txwi));
+ memset(txwi, 0, sizeof(*txwi));
+
+ if (!wcid->tx_rate_set)
+ ieee80211_get_tx_rates(info->control.vif, sta, skb,
+ info->control.rates, 1);
+
+ spin_lock_irqsave(&dev->mt76.lock, flags);
+ if (rate->idx < 0 || !rate->count) {
+ rate_ctl = wcid->tx_rate;
+ nss = wcid->tx_rate_nss;
+ } else {
+ rate_ctl = mt76x0_mac_tx_rate_val(dev, rate, &nss);
+ }
+ spin_unlock_irqrestore(&dev->mt76.lock, flags);
+
+ txwi->rate_ctl = cpu_to_le16(rate_ctl);
+
+ if (info->flags & IEEE80211_TX_CTL_LDPC)
+ txwi->rate_ctl |= cpu_to_le16(MT_RXWI_RATE_LDPC);
+ if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
+ txwi->rate_ctl |= cpu_to_le16(MT_RXWI_RATE_STBC);
+ if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+ txwi_flags |= MT_TXWI_FLAGS_MMPS;
+
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
+ pkt_id = 1;
+ } else {
+ pkt_id = 0;
+ }
+
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ pkt_id |= MT_TXWI_PKTID_PROBE;
+
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
+
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
+ u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
+
+ ba_size <<= sta->ht_cap.ampdu_factor;
+ ba_size = min_t(int, 7, ba_size - 1);
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
+ ba_size = 0;
+ } else {
+ txwi_flags |= MT_TXWI_FLAGS_AMPDU;
+ txwi_flags |= FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
+ sta->ht_cap.ampdu_density);
+ }
+ txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+ }
+
+ txwi->wcid = wcid->idx;
+ txwi->flags |= cpu_to_le16(txwi_flags);
+ txwi->len_ctl = cpu_to_le16(pkt_len);
+ txwi->pktid = pkt_id;
+
+ return txwi;
+}
+
+void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76x0_dev *dev = hw->priv;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct ieee80211_sta *sta = control->sta;
+ struct mt76_sta *msta = NULL;
+ struct mt76_wcid *wcid = dev->mon_wcid;
+ struct mt76_txwi *txwi;
+ int pkt_len = skb->len;
+ int hw_q = skb2q(skb);
+
+ BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
+ info->status.status_driver_data[0] = (void *)(unsigned long)pkt_len;
+
+ if (mt76x0_skb_rooms(dev, skb) || mt76x0_insert_hdr_pad(skb)) {
+ ieee80211_free_txskb(dev->mt76.hw, skb);
+ return;
+ }
+
+ if (sta) {
+ msta = (struct mt76_sta *) sta->drv_priv;
+ wcid = &msta->wcid;
+ } else if (vif && (!info->control.hw_key && wcid->hw_key_idx != -1)) {
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+
+ wcid = &mvif->group_wcid;
+ }
+
+ txwi = mt76x0_push_txwi(dev, skb, sta, wcid, pkt_len);
+
+ if (mt76x0_dma_enqueue_tx(dev, skb, wcid, hw_q))
+ return;
+
+ trace_mt76x0_tx(&dev->mt76, skb, msta, txwi);
+}
+
+void mt76x0_tx_stat(struct work_struct *work)
+{
+ struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
+ stat_work.work);
+ struct mt76_tx_status stat;
+ unsigned long flags;
+ int cleaned = 0;
+ u8 update = 1;
+
+ while (!test_bit(MT76_REMOVED, &dev->mt76.state)) {
+ stat = mt76x0_mac_fetch_tx_status(dev);
+ if (!stat.valid)
+ break;
+
+ mt76x0_send_tx_status(dev, &stat, &update);
+
+ cleaned++;
+ }
+ trace_mt76x0_tx_status_cleaned(&dev->mt76, cleaned);
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+ if (cleaned)
+ queue_delayed_work(dev->stat_wq, &dev->stat_work,
+ msecs_to_jiffies(10));
+ else if (test_and_clear_bit(MT76_MORE_STATS, &dev->mt76.state))
+ queue_delayed_work(dev->stat_wq, &dev->stat_work,
+ msecs_to_jiffies(20));
+ else
+ clear_bit(MT76_READING_STATS, &dev->mt76.state);
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
+
+int mt76x0_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params)
+{
+ struct mt76x0_dev *dev = hw->priv;
+ u8 cw_min = 5, cw_max = 10, hw_q = q2hwq(queue);
+ u32 val;
+
+ /* TODO: should we do funny things with the parameters?
+ * See what mt76x0_set_default_edca() used to do in init.c.
+ */
+
+ if (params->cw_min)
+ cw_min = fls(params->cw_min);
+ if (params->cw_max)
+ cw_max = fls(params->cw_max);
+
+ WARN_ON(params->txop > 0xff);
+ WARN_ON(params->aifs > 0xf);
+ WARN_ON(cw_min > 0xf);
+ WARN_ON(cw_max > 0xf);
+
+ val = FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
+ FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
+ FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
+ /* TODO: based on user-controlled EnableTxBurst var vendor drv sets
+ * a really long txop on AC0 (see connect.c:2009) but only on
+ * connect? When not connected should be 0.
+ */
+ if (!hw_q)
+ val |= 0x60;
+ else
+ val |= FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop);
+ mt76_wr(dev, MT_EDCA_CFG_AC(hw_q), val);
+
+ val = mt76_rr(dev, MT_WMM_TXOP(hw_q));
+ val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(hw_q));
+ val |= params->txop << MT_WMM_TXOP_SHIFT(hw_q);
+ mt76_wr(dev, MT_WMM_TXOP(hw_q), val);
+
+ val = mt76_rr(dev, MT_WMM_AIFSN);
+ val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(hw_q));
+ val |= params->aifs << MT_WMM_AIFSN_SHIFT(hw_q);
+ mt76_wr(dev, MT_WMM_AIFSN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMIN);
+ val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(hw_q));
+ val |= cw_min << MT_WMM_CWMIN_SHIFT(hw_q);
+ mt76_wr(dev, MT_WMM_CWMIN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMAX);
+ val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(hw_q));
+ val |= cw_max << MT_WMM_CWMAX_SHIFT(hw_q);
+ mt76_wr(dev, MT_WMM_CWMAX, val);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
new file mode 100644
index 000000000000..54ae1f113be2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -0,0 +1,381 @@
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "mt76x0.h"
+#include "usb.h"
+#include "trace.h"
+
+static struct usb_device_id mt76x0_device_table[] = {
+ { USB_DEVICE(0x148F, 0x7610) }, /* MT7610U */
+ { USB_DEVICE(0x13B1, 0x003E) }, /* Linksys AE6000 */
+ { USB_DEVICE(0x0E8D, 0x7610) }, /* Sabrent NTWLAC */
+ { USB_DEVICE(0x7392, 0xa711) }, /* Edimax 7711mac */
+ { USB_DEVICE(0x7392, 0xb711) }, /* Edimax / Elecom */
+ { USB_DEVICE(0x148f, 0x761a) }, /* TP-Link TL-WDN5200 */
+ { USB_DEVICE(0x148f, 0x760a) }, /* TP-Link unknown */
+ { USB_DEVICE(0x0b05, 0x17d1) }, /* Asus USB-AC51 */
+ { USB_DEVICE(0x0b05, 0x17db) }, /* Asus USB-AC50 */
+ { USB_DEVICE(0x0df6, 0x0075) }, /* Sitecom WLA-3100 */
+ { USB_DEVICE(0x2019, 0xab31) }, /* Planex GW-450D */
+ { USB_DEVICE(0x2001, 0x3d02) }, /* D-LINK DWA-171 rev B1 */
+ { USB_DEVICE(0x0586, 0x3425) }, /* Zyxel NWD6505 */
+ { USB_DEVICE(0x07b8, 0x7610) }, /* AboCom AU7212 */
+ { USB_DEVICE(0x04bb, 0x0951) }, /* I-O DATA WN-AC433UK */
+ { USB_DEVICE(0x057c, 0x8502) }, /* AVM FRITZ!WLAN USB Stick AC 430 */
+ { USB_DEVICE(0x293c, 0x5702) }, /* Comcast Xfinity KXW02AAA */
+ { USB_DEVICE(0x20f4, 0x806b) }, /* TRENDnet TEW-806UBH */
+ { USB_DEVICE(0x7392, 0xc711) }, /* Devolo Wifi ac Stick */
+ { USB_DEVICE(0x0df6, 0x0079) }, /* Sitecom Europe B.V. ac Stick */
+ { USB_DEVICE(0x2357, 0x0105) }, /* TP-LINK Archer T1U */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7630, 0xff, 0x2, 0xff)}, /* MT7630U */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7650, 0xff, 0x2, 0xff)}, /* MT7650U */
+ { 0, }
+};
+
+bool mt76x0_usb_alloc_buf(struct mt76x0_dev *dev, size_t len,
+ struct mt76x0_dma_buf *buf)
+{
+ struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+
+ buf->len = len;
+ buf->urb = usb_alloc_urb(0, GFP_KERNEL);
+ buf->buf = usb_alloc_coherent(usb_dev, buf->len, GFP_KERNEL, &buf->dma);
+
+ return !buf->urb || !buf->buf;
+}
+
+void mt76x0_usb_free_buf(struct mt76x0_dev *dev, struct mt76x0_dma_buf *buf)
+{
+ struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+
+ usb_free_coherent(usb_dev, buf->len, buf->buf, buf->dma);
+ usb_free_urb(buf->urb);
+}
+
+int mt76x0_usb_submit_buf(struct mt76x0_dev *dev, int dir, int ep_idx,
+ struct mt76x0_dma_buf *buf, gfp_t gfp,
+ usb_complete_t complete_fn, void *context)
+{
+ struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+ unsigned pipe;
+ int ret;
+
+ if (dir == USB_DIR_IN)
+ pipe = usb_rcvbulkpipe(usb_dev, dev->in_ep[ep_idx]);
+ else
+ pipe = usb_sndbulkpipe(usb_dev, dev->out_ep[ep_idx]);
+
+ usb_fill_bulk_urb(buf->urb, usb_dev, pipe, buf->buf, buf->len,
+ complete_fn, context);
+ buf->urb->transfer_dma = buf->dma;
+ buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ trace_mt76x0_submit_urb(&dev->mt76, buf->urb);
+ ret = usb_submit_urb(buf->urb, gfp);
+ if (ret)
+ dev_err(dev->mt76.dev, "Error: submit URB dir:%d ep:%d failed:%d\n",
+ dir, ep_idx, ret);
+ return ret;
+}
+
+void mt76x0_complete_urb(struct urb *urb)
+{
+ struct completion *cmpl = urb->context;
+
+ complete(cmpl);
+}
+
+int mt76x0_vendor_request(struct mt76x0_dev *dev, const u8 req,
+ const u8 direction, const u16 val, const u16 offset,
+ void *buf, const size_t buflen)
+{
+ int i, ret;
+ struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+ const u8 req_type = direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
+ const unsigned int pipe = (direction == USB_DIR_IN) ?
+ usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);
+
+ for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
+ ret = usb_control_msg(usb_dev, pipe, req, req_type,
+ val, offset, buf, buflen,
+ MT_VEND_REQ_TOUT_MS);
+ trace_mt76x0_vend_req(&dev->mt76, pipe, req, req_type, val, offset,
+ buf, buflen, ret);
+
+ if (ret == -ENODEV)
+ set_bit(MT76_REMOVED, &dev->mt76.state);
+ if (ret >= 0 || ret == -ENODEV)
+ return ret;
+
+ msleep(5);
+ }
+
+ dev_err(dev->mt76.dev, "Vendor request req:%02x off:%04x failed:%d\n",
+ req, offset, ret);
+
+ return ret;
+}
+
+void mt76x0_vendor_reset(struct mt76x0_dev *dev)
+{
+ mt76x0_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
+ MT_VEND_DEV_MODE_RESET, 0, NULL, 0);
+}
+
+static u32 mt76x0_rr(struct mt76_dev *dev, u32 offset)
+{
+ struct mt76x0_dev *mdev = (struct mt76x0_dev *) dev;
+ int ret;
+ u32 val = ~0;
+
+ WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset);
+
+ mutex_lock(&mdev->usb_ctrl_mtx);
+
+ ret = mt76x0_vendor_request((struct mt76x0_dev *)dev, MT_VEND_MULTI_READ, USB_DIR_IN,
+ 0, offset, mdev->data, MT_VEND_BUF);
+ if (ret == MT_VEND_BUF)
+ val = get_unaligned_le32(mdev->data);
+ else if (ret > 0)
+ dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n",
+ ret, offset);
+
+ mutex_unlock(&mdev->usb_ctrl_mtx);
+
+ trace_mt76x0_reg_read(dev, offset, val);
+ return val;
+}
+
+int mt76x0_vendor_single_wr(struct mt76x0_dev *dev, const u8 req,
+ const u16 offset, const u32 val)
+{
+ struct mt76x0_dev *mdev = dev;
+ int ret;
+
+ mutex_lock(&mdev->usb_ctrl_mtx);
+
+ ret = mt76x0_vendor_request(dev, req, USB_DIR_OUT,
+ val & 0xffff, offset, NULL, 0);
+ if (!ret)
+ ret = mt76x0_vendor_request(dev, req, USB_DIR_OUT,
+ val >> 16, offset + 2, NULL, 0);
+
+ mutex_unlock(&mdev->usb_ctrl_mtx);
+
+ return ret;
+}
+
+static void mt76x0_wr(struct mt76_dev *dev, u32 offset, u32 val)
+{
+ struct mt76x0_dev *mdev = (struct mt76x0_dev *) dev;
+ int ret;
+
+ WARN_ONCE(offset > USHRT_MAX, "write high off:%08x", offset);
+
+ mutex_lock(&mdev->usb_ctrl_mtx);
+
+ put_unaligned_le32(val, mdev->data);
+ ret = mt76x0_vendor_request(mdev, MT_VEND_MULTI_WRITE, USB_DIR_OUT,
+ 0, offset, mdev->data, MT_VEND_BUF);
+ trace_mt76x0_reg_write(dev, offset, val);
+
+ mutex_unlock(&mdev->usb_ctrl_mtx);
+}
+
+static u32 mt76x0_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
+{
+ val |= mt76x0_rr(dev, offset) & ~mask;
+ mt76x0_wr(dev, offset, val);
+ return val;
+}
+
+static void mt76x0_wr_copy(struct mt76_dev *dev, u32 offset,
+ const void *data, int len)
+{
+ WARN_ONCE(offset & 3, "unaligned write copy off:%08x", offset);
+ WARN_ONCE(len & 3, "short write copy off:%08x", offset);
+
+ mt76x0_burst_write_regs((struct mt76x0_dev *) dev, offset, data, len / 4);
+}
+
+void mt76x0_addr_wr(struct mt76x0_dev *dev, const u32 offset, const u8 *addr)
+{
+ mt76_wr(dev, offset, get_unaligned_le32(addr));
+ mt76_wr(dev, offset + 4, addr[4] | addr[5] << 8);
+}
+
+static int mt76x0_assign_pipes(struct usb_interface *usb_intf,
+ struct mt76x0_dev *dev)
+{
+ struct usb_endpoint_descriptor *ep_desc;
+ struct usb_host_interface *intf_desc = usb_intf->cur_altsetting;
+ unsigned i, ep_i = 0, ep_o = 0;
+
+ BUILD_BUG_ON(sizeof(dev->in_ep) < __MT_EP_IN_MAX);
+ BUILD_BUG_ON(sizeof(dev->out_ep) < __MT_EP_OUT_MAX);
+
+ for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
+ ep_desc = &intf_desc->endpoint[i].desc;
+
+ if (usb_endpoint_is_bulk_in(ep_desc) &&
+ ep_i++ < __MT_EP_IN_MAX) {
+ dev->in_ep[ep_i - 1] = usb_endpoint_num(ep_desc);
+ dev->in_max_packet = usb_endpoint_maxp(ep_desc);
+ /* Note: this is ignored by usb sub-system but vendor
+ * code does it. We can drop this at some point.
+ */
+ dev->in_ep[ep_i - 1] |= USB_DIR_IN;
+ } else if (usb_endpoint_is_bulk_out(ep_desc) &&
+ ep_o++ < __MT_EP_OUT_MAX) {
+ dev->out_ep[ep_o - 1] = usb_endpoint_num(ep_desc);
+ dev->out_max_packet = usb_endpoint_maxp(ep_desc);
+ }
+ }
+
+ if (ep_i != __MT_EP_IN_MAX || ep_o != __MT_EP_OUT_MAX) {
+ dev_err(dev->mt76.dev, "Error: wrong pipe number in:%d out:%d\n",
+ ep_i, ep_o);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mt76x0_probe(struct usb_interface *usb_intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
+ struct mt76x0_dev *dev;
+ u32 asic_rev, mac_rev;
+ int ret;
+ static const struct mt76_bus_ops usb_ops = {
+ .rr = mt76x0_rr,
+ .wr = mt76x0_wr,
+ .rmw = mt76x0_rmw,
+ .copy = mt76x0_wr_copy,
+ };
+
+ dev = mt76x0_alloc_device(&usb_intf->dev);
+ if (!dev)
+ return -ENOMEM;
+
+ usb_dev = usb_get_dev(usb_dev);
+ usb_reset_device(usb_dev);
+
+ usb_set_intfdata(usb_intf, dev);
+
+ dev->mt76.bus = &usb_ops;
+
+ ret = mt76x0_assign_pipes(usb_intf, dev);
+ if (ret)
+ goto err;
+
+ /* Disable the HW, otherwise MCU fail to initalize on hot reboot */
+ mt76x0_chip_onoff(dev, false, false);
+
+ ret = mt76x0_wait_asic_ready(dev);
+ if (ret)
+ goto err;
+
+ asic_rev = mt76_rr(dev, MT_ASIC_VERSION);
+ mac_rev = mt76_rr(dev, MT_MAC_CSR0);
+ dev_info(dev->mt76.dev, "ASIC revision: %08x MAC revision: %08x\n",
+ asic_rev, mac_rev);
+
+ /* Note: vendor driver skips this check for MT76X0U */
+ if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
+ dev_warn(dev->mt76.dev, "Warning: eFUSE not present\n");
+
+ ret = mt76x0_init_hardware(dev);
+ if (ret)
+ goto err;
+
+ ret = mt76x0_register_device(dev);
+ if (ret)
+ goto err_hw;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+ return 0;
+err_hw:
+ mt76x0_cleanup(dev);
+err:
+ usb_set_intfdata(usb_intf, NULL);
+ usb_put_dev(interface_to_usbdev(usb_intf));
+
+ destroy_workqueue(dev->stat_wq);
+ ieee80211_free_hw(dev->mt76.hw);
+ return ret;
+}
+
+static void mt76x0_disconnect(struct usb_interface *usb_intf)
+{
+ struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
+ bool initalized = test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+ if (!initalized)
+ return;
+
+ ieee80211_unregister_hw(dev->mt76.hw);
+ mt76x0_cleanup(dev);
+
+ usb_set_intfdata(usb_intf, NULL);
+ usb_put_dev(interface_to_usbdev(usb_intf));
+
+ destroy_workqueue(dev->stat_wq);
+ ieee80211_free_hw(dev->mt76.hw);
+}
+
+static int mt76x0_suspend(struct usb_interface *usb_intf, pm_message_t state)
+{
+ struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
+
+ mt76x0_cleanup(dev);
+
+ return 0;
+}
+
+static int mt76x0_resume(struct usb_interface *usb_intf)
+{
+ struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
+ int ret;
+
+ ret = mt76x0_init_hardware(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(usb, mt76x0_device_table);
+MODULE_FIRMWARE(MT7610_FIRMWARE);
+MODULE_LICENSE("GPL");
+
+static struct usb_driver mt76x0_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt76x0_device_table,
+ .probe = mt76x0_probe,
+ .disconnect = mt76x0_disconnect,
+ .suspend = mt76x0_suspend,
+ .resume = mt76x0_resume,
+ .reset_resume = mt76x0_resume,
+ .soft_unbind = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(mt76x0_driver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h
new file mode 100644
index 000000000000..492e431390a8
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76X0U_USB_H
+#define __MT76X0U_USB_H
+
+#include "mt76x0.h"
+
+#define MT7610_FIRMWARE "mediatek/mt7610u.bin"
+
+#define MT_VEND_REQ_MAX_RETRY 10
+#define MT_VEND_REQ_TOUT_MS 300
+
+#define MT_VEND_DEV_MODE_RESET 1
+
+#define MT_VEND_BUF sizeof(__le32)
+
+static inline struct usb_device *mt76x0_to_usb_dev(struct mt76x0_dev *mt76x0)
+{
+ return interface_to_usbdev(to_usb_interface(mt76x0->mt76.dev));
+}
+
+static inline struct usb_device *mt76_to_usb_dev(struct mt76_dev *mt76)
+{
+ return interface_to_usbdev(to_usb_interface(mt76->dev));
+}
+
+static inline bool mt76x0_urb_has_error(struct urb *urb)
+{
+ return urb->status &&
+ urb->status != -ENOENT &&
+ urb->status != -ECONNRESET &&
+ urb->status != -ESHUTDOWN;
+}
+
+bool mt76x0_usb_alloc_buf(struct mt76x0_dev *dev, size_t len,
+ struct mt76x0_dma_buf *buf);
+void mt76x0_usb_free_buf(struct mt76x0_dev *dev, struct mt76x0_dma_buf *buf);
+int mt76x0_usb_submit_buf(struct mt76x0_dev *dev, int dir, int ep_idx,
+ struct mt76x0_dma_buf *buf, gfp_t gfp,
+ usb_complete_t complete_fn, void *context);
+void mt76x0_complete_urb(struct urb *urb);
+
+int mt76x0_vendor_request(struct mt76x0_dev *dev, const u8 req,
+ const u8 direction, const u16 val, const u16 offset,
+ void *buf, const size_t buflen);
+void mt76x0_vendor_reset(struct mt76x0_dev *dev);
+int mt76x0_vendor_single_wr(struct mt76x0_dev *dev, const u8 req,
+ const u16 offset, const u32 val);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/util.c b/drivers/net/wireless/mediatek/mt76/mt76x0/util.c
new file mode 100644
index 000000000000..7856dd760419
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/util.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt76x0.h"
+
+void mt76x0_remove_hdr_pad(struct sk_buff *skb)
+{
+ int len = ieee80211_get_hdrlen_from_skb(skb);
+
+ memmove(skb->data + 2, skb->data, len);
+ skb_pull(skb, 2);
+}
+
+int mt76x0_insert_hdr_pad(struct sk_buff *skb)
+{
+ int len = ieee80211_get_hdrlen_from_skb(skb);
+ int ret;
+
+ if (len % 4 == 0)
+ return 0;
+
+ ret = skb_cow(skb, 2);
+ if (ret)
+ return ret;
+
+ skb_push(skb, 2);
+ memmove(skb->data, skb->data + 2, len);
+
+ skb->data[len] = 0;
+ skb->data[len + 1] = 0;
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h
index 71fcfa44fb2e..dca3209bf5f1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h
@@ -33,6 +33,9 @@
#define MT7662_ROM_PATCH "mt7662_rom_patch.bin"
#define MT7662_EEPROM_SIZE 512
+#define MT7662U_FIRMWARE "mediatek/mt7662u.bin"
+#define MT7662U_ROM_PATCH "mediatek/mt7662u_rom_patch.bin"
+
#define MT76x2_RX_RING_SIZE 256
#define MT_RX_HEADROOM 32
@@ -55,6 +58,7 @@ struct mt76x2_mcu {
wait_queue_head_t wait;
struct sk_buff_head res_q;
+ struct mt76u_buf res_u;
u32 msg_seq;
};
@@ -159,6 +163,23 @@ struct mt76x2_sta {
int inactive_count;
};
+static inline bool mt76x2_wait_for_mac(struct mt76x2_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < 500; i++) {
+ switch (mt76_rr(dev, MT_MAC_CSR0)) {
+ case 0:
+ case ~0:
+ break;
+ default:
+ return true;
+ }
+ usleep_range(5000, 10000);
+ }
+ return false;
+}
+
static inline bool is_mt7612(struct mt76x2_dev *dev)
{
return mt76_chip(&dev->mt76) == 0x7612;
@@ -166,6 +187,14 @@ static inline bool is_mt7612(struct mt76x2_dev *dev)
void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set);
+static inline bool mt76x2_channel_silent(struct mt76x2_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+
+ return ((chan->flags & IEEE80211_CHAN_RADAR) &&
+ chan->dfs_state != NL80211_DFS_AVAILABLE);
+}
+
static inline void mt76x2_irq_enable(struct mt76x2_dev *dev, u32 mask)
{
mt76x2_set_irq_mask(dev, 0, mask);
@@ -176,11 +205,29 @@ static inline void mt76x2_irq_disable(struct mt76x2_dev *dev, u32 mask)
mt76x2_set_irq_mask(dev, mask, 0);
}
+static inline bool mt76x2_wait_for_bbp(struct mt76x2_dev *dev)
+{
+ return mt76_poll_msec(dev, MT_MAC_STATUS,
+ MT_MAC_STATUS_TX | MT_MAC_STATUS_RX,
+ 0, 100);
+}
+
+static inline bool wait_for_wpdma(struct mt76x2_dev *dev)
+{
+ return mt76_poll(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
+ 0, 1000);
+}
+
extern const struct ieee80211_ops mt76x2_ops;
+extern struct ieee80211_rate mt76x2_rates[12];
+
struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev);
int mt76x2_register_device(struct mt76x2_dev *dev);
void mt76x2_init_debugfs(struct mt76x2_dev *dev);
+void mt76x2_init_device(struct mt76x2_dev *dev);
irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance);
void mt76x2_phy_power_on(struct mt76x2_dev *dev);
@@ -194,7 +241,7 @@ void mt76x2_phy_set_antenna(struct mt76x2_dev *dev);
int mt76x2_phy_start(struct mt76x2_dev *dev);
int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
struct cfg80211_chan_def *chandef);
-int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain);
+int mt76x2_mac_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain);
void mt76x2_phy_calibrate(struct work_struct *work);
void mt76x2_phy_set_txpower(struct mt76x2_dev *dev);
@@ -222,6 +269,7 @@ int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
u32 *tx_info);
void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
struct mt76_queue_entry *e, bool flush);
+void mt76x2_mac_set_tx_protection(struct mt76x2_dev *dev, u32 val);
void mt76x2_pre_tbtt_tasklet(unsigned long arg);
@@ -238,4 +286,45 @@ s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj);
void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr);
+int mt76x2_insert_hdr_pad(struct sk_buff *skb);
+
+bool mt76x2_mac_load_tx_status(struct mt76x2_dev *dev,
+ struct mt76x2_tx_status *stat);
+void mt76x2_send_tx_status(struct mt76x2_dev *dev,
+ struct mt76x2_tx_status *stat, u8 *update);
+void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable);
+void mt76x2_init_txpower(struct mt76x2_dev *dev,
+ struct ieee80211_supported_band *sband);
+void mt76_write_mac_initvals(struct mt76x2_dev *dev);
+
+int mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params);
+int mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt76x2_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+int mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+int mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params);
+void mt76x2_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast);
+void mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq);
+void mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+
+void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
+ enum nl80211_band band);
+void mt76x2_configure_tx_delay(struct mt76x2_dev *dev,
+ enum nl80211_band band, u8 bw);
+void mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl);
+void mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper);
+int mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev);
+void mt76x2_apply_gain_adj(struct mt76x2_dev *dev);
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_common.c
new file mode 100644
index 000000000000..a2338ba139b4
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_common.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+
+void mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq)
+{
+ struct mt76_txq *mtxq;
+
+ if (!txq)
+ return;
+
+ mtxq = (struct mt76_txq *) txq->drv_priv;
+ if (txq->sta) {
+ struct mt76x2_sta *sta;
+
+ sta = (struct mt76x2_sta *) txq->sta->drv_priv;
+ mtxq->wcid = &sta->wcid;
+ } else {
+ struct mt76x2_vif *mvif;
+
+ mvif = (struct mt76x2_vif *) txq->vif->drv_priv;
+ mtxq->wcid = &mvif->group_wcid;
+ }
+
+ mt76_txq_init(&dev->mt76, txq);
+}
+EXPORT_SYMBOL_GPL(mt76x2_txq_init);
+
+int mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ struct ieee80211_sta *sta = params->sta;
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+ struct ieee80211_txq *txq = sta->txq[params->tid];
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
+ struct mt76_txq *mtxq;
+
+ if (!txq)
+ return -EINVAL;
+
+ mtxq = (struct mt76_txq *)txq->drv_priv;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn, params->buf_size);
+ mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+ mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
+ BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ mtxq->aggr = true;
+ mtxq->send_bar = false;
+ ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ mtxq->aggr = false;
+ ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ mtxq->agg_ssn = *ssn << 4;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ mtxq->aggr = false;
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_ampdu_action);
+
+int mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+ struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+ int ret = 0;
+ int idx = 0;
+ int i;
+
+ mutex_lock(&dev->mutex);
+
+ idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ msta->vif = mvif;
+ msta->wcid.sta = 1;
+ msta->wcid.idx = idx;
+ msta->wcid.hw_key_idx = -1;
+ mt76x2_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
+ mt76x2_mac_wcid_set_drop(dev, idx, false);
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ mt76x2_txq_init(dev, sta->txq[i]);
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
+
+ ewma_signal_init(&msta->rssi);
+
+ rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
+
+out:
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76x2_sta_add);
+
+int mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+ int idx = msta->wcid.idx;
+ int i;
+
+ mutex_lock(&dev->mutex);
+ rcu_assign_pointer(dev->wcid[idx], NULL);
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ mt76_txq_remove(&dev->mt76, sta->txq[i]);
+ mt76x2_mac_wcid_set_drop(dev, idx, true);
+ mt76_wcid_free(dev->wcid_mask, idx);
+ mt76x2_mac_wcid_setup(dev, idx, 0, NULL);
+ mutex_unlock(&dev->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_sta_remove);
+
+void mt76x2_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ mt76_txq_remove(&dev->mt76, vif->txq);
+}
+EXPORT_SYMBOL_GPL(mt76x2_remove_interface);
+
+int mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+ struct mt76x2_sta *msta;
+ struct mt76_wcid *wcid;
+ int idx = key->keyidx;
+ int ret;
+
+ /* fall back to sw encryption for unsupported ciphers */
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * The hardware does not support per-STA RX GTK, fall back
+ * to software mode for these.
+ */
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
+ (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+ key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ msta = sta ? (struct mt76x2_sta *) sta->drv_priv : NULL;
+ wcid = msta ? &msta->wcid : &mvif->group_wcid;
+
+ if (cmd == SET_KEY) {
+ key->hw_key_idx = wcid->idx;
+ wcid->hw_key_idx = idx;
+ if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ wcid->sw_iv = true;
+ }
+ } else {
+ if (idx == wcid->hw_key_idx) {
+ wcid->hw_key_idx = -1;
+ wcid->sw_iv = true;
+ }
+
+ key = NULL;
+ }
+ mt76_wcid_key_setup(&dev->mt76, wcid, key);
+
+ if (!msta) {
+ if (key || wcid->hw_key_idx == idx) {
+ ret = mt76x2_mac_wcid_set_key(dev, wcid->idx, key);
+ if (ret)
+ return ret;
+ }
+
+ return mt76x2_mac_shared_key_setup(dev, mvif->idx, idx, key);
+ }
+
+ return mt76x2_mac_wcid_set_key(dev, msta->wcid.idx, key);
+}
+EXPORT_SYMBOL_GPL(mt76x2_set_key);
+
+int mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ u8 cw_min = 5, cw_max = 10, qid;
+ u32 val;
+
+ qid = dev->mt76.q_tx[queue].hw_idx;
+
+ if (params->cw_min)
+ cw_min = fls(params->cw_min);
+ if (params->cw_max)
+ cw_max = fls(params->cw_max);
+
+ val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) |
+ FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
+ FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
+ FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
+ mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
+
+ val = mt76_rr(dev, MT_WMM_TXOP(qid));
+ val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid));
+ val |= params->txop << MT_WMM_TXOP_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_TXOP(qid), val);
+
+ val = mt76_rr(dev, MT_WMM_AIFSN);
+ val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid));
+ val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_AIFSN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMIN);
+ val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid));
+ val |= cw_min << MT_WMM_CWMIN_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_CWMIN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMAX);
+ val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid));
+ val |= cw_max << MT_WMM_CWMAX_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_CWMAX, val);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_conf_tx);
+
+void mt76x2_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+ flags |= *total_flags & FIF_##_flag; \
+ dev->rxfilter &= ~(_hw); \
+ dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ } while (0)
+
+ mutex_lock(&dev->mutex);
+
+ dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
+
+ MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
+ MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
+ MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
+ MT_RX_FILTR_CFG_CTS |
+ MT_RX_FILTR_CFG_CFEND |
+ MT_RX_FILTR_CFG_CFACK |
+ MT_RX_FILTR_CFG_BA |
+ MT_RX_FILTR_CFG_CTRL_RSV);
+ MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
+
+ *total_flags = flags;
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+ mutex_unlock(&dev->mutex);
+}
+EXPORT_SYMBOL_GPL(mt76x2_configure_filter);
+
+void mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+ struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
+ struct ieee80211_tx_rate rate = {};
+
+ if (!rates)
+ return;
+
+ rate.idx = rates->rate[0].idx;
+ rate.flags = rates->rate[0].flags;
+ mt76x2_mac_wcid_set_rate(dev, &msta->wcid, &rate);
+ msta->wcid.max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, &rate);
+}
+EXPORT_SYMBOL_GPL(mt76x2_sta_rate_tbl_update);
+
+void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb)
+{
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+ void *rxwi = skb->data;
+
+ if (q == MT_RXQ_MCU) {
+ skb_queue_tail(&dev->mcu.res_q, skb);
+ wake_up(&dev->mcu.wait);
+ return;
+ }
+
+ skb_pull(skb, sizeof(struct mt76x2_rxwi));
+ if (mt76x2_mac_process_rx(dev, skb, rxwi)) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ mt76_rx(&dev->mt76, q, skb);
+}
+EXPORT_SYMBOL_GPL(mt76x2_queue_rx_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
index 74725902e6dc..77b5ff1be05f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
@@ -153,3 +153,4 @@ void mt76x2_init_debugfs(struct mt76x2_dev *dev)
debugfs_create_devm_seqfile(dev->mt76.dev, "agc", dir, read_agc);
}
+EXPORT_SYMBOL_GPL(mt76x2_init_debugfs);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
index fd1ec4743e0b..6720a6a1313f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
@@ -66,27 +66,6 @@ mt76x2_init_tx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
return 0;
}
-void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb)
-{
- struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
- void *rxwi = skb->data;
-
- if (q == MT_RXQ_MCU) {
- skb_queue_tail(&dev->mcu.res_q, skb);
- wake_up(&dev->mcu.wait);
- return;
- }
-
- skb_pull(skb, sizeof(struct mt76x2_rxwi));
- if (mt76x2_mac_process_rx(dev, skb, rxwi)) {
- dev_kfree_skb(skb);
- return;
- }
-
- mt76_rx(&dev->mt76, q, skb);
-}
-
static int
mt76x2_init_rx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
int idx, int n_desc, int bufsize)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
index e9d426bbf91a..da294558c268 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
@@ -19,34 +19,6 @@
#include "dma.h"
-#define MT_TXD_INFO_LEN GENMASK(15, 0)
-#define MT_TXD_INFO_NEXT_VLD BIT(16)
-#define MT_TXD_INFO_TX_BURST BIT(17)
-#define MT_TXD_INFO_80211 BIT(19)
-#define MT_TXD_INFO_TSO BIT(20)
-#define MT_TXD_INFO_CSO BIT(21)
-#define MT_TXD_INFO_WIV BIT(24)
-#define MT_TXD_INFO_QSEL GENMASK(26, 25)
-#define MT_TXD_INFO_DPORT GENMASK(29, 27)
-#define MT_TXD_INFO_TYPE GENMASK(31, 30)
-
-#define MT_RX_FCE_INFO_LEN GENMASK(13, 0)
-#define MT_RX_FCE_INFO_SELF_GEN BIT(15)
-#define MT_RX_FCE_INFO_CMD_SEQ GENMASK(19, 16)
-#define MT_RX_FCE_INFO_EVT_TYPE GENMASK(23, 20)
-#define MT_RX_FCE_INFO_PCIE_INTR BIT(24)
-#define MT_RX_FCE_INFO_QSEL GENMASK(26, 25)
-#define MT_RX_FCE_INFO_D_PORT GENMASK(29, 27)
-#define MT_RX_FCE_INFO_TYPE GENMASK(31, 30)
-
-/* MCU request message header */
-#define MT_MCU_MSG_LEN GENMASK(15, 0)
-#define MT_MCU_MSG_CMD_SEQ GENMASK(19, 16)
-#define MT_MCU_MSG_CMD_TYPE GENMASK(26, 20)
-#define MT_MCU_MSG_PORT GENMASK(29, 27)
-#define MT_MCU_MSG_TYPE GENMASK(31, 30)
-#define MT_MCU_MSG_TYPE_CMD BIT(30)
-
enum mt76x2_qsel {
MT_QSEL_MGMT,
MT_QSEL_HCCA,
@@ -54,14 +26,4 @@ enum mt76x2_qsel {
MT_QSEL_EDCA_2,
};
-enum dma_msg_port {
- WLAN_PORT,
- CPU_RX_PORT,
- CPU_TX_PORT,
- HOST_PORT,
- VIRTUAL_CPU_RX_PORT,
- VIRTUAL_CPU_TX_PORT,
- DISCARD,
-};
-
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
index 95d5f7d888f0..1753bcb36356 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
@@ -40,8 +40,7 @@ mt76x2_eeprom_get_macaddr(struct mt76x2_dev *dev)
return 0;
}
-static void
-mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev)
+void mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev)
{
u16 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0);
@@ -58,6 +57,7 @@ mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev)
break;
}
}
+EXPORT_SYMBOL_GPL(mt76x2_eeprom_parse_hw_cap);
static int
mt76x2_efuse_read(struct mt76x2_dev *dev, u16 addr, u8 *data)
@@ -415,6 +415,7 @@ void mt76x2_read_rx_gain(struct mt76x2_dev *dev)
dev->cal.rx.lna_gain = mt76x2_sign_extend(lna, 8);
}
+EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);
static s8
mt76x2_rate_power_val(u8 val)
@@ -482,6 +483,7 @@ void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t,
val >>= 8;
t->vht[8] = t->vht[9] = mt76x2_rate_power_val(val >> 8);
}
+EXPORT_SYMBOL_GPL(mt76x2_get_rate_power);
int mt76x2_get_max_rate_power(struct mt76_rate_power *r)
{
@@ -493,6 +495,7 @@ int mt76x2_get_max_rate_power(struct mt76_rate_power *r)
return ret;
}
+EXPORT_SYMBOL_GPL(mt76x2_get_max_rate_power);
static void
mt76x2_get_power_info_2g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
@@ -600,6 +603,7 @@ void mt76x2_get_power_info(struct mt76x2_dev *dev,
t->delta_bw40 = mt76x2_rate_power_val(bw40);
t->delta_bw80 = mt76x2_rate_power_val(bw80);
}
+EXPORT_SYMBOL_GPL(mt76x2_get_power_info);
int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t)
{
@@ -632,6 +636,7 @@ int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t)
return 0;
}
+EXPORT_SYMBOL_GPL(mt76x2_get_temp_comp);
bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band)
{
@@ -642,6 +647,7 @@ bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band)
else
return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_2G);
}
+EXPORT_SYMBOL_GPL(mt76x2_ext_pa_enabled);
int mt76x2_eeprom_init(struct mt76x2_dev *dev)
{
@@ -658,3 +664,6 @@ int mt76x2_eeprom_init(struct mt76x2_dev *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(mt76x2_eeprom_init);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
index aa0b0c040375..0f3e4d2f4fee 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
@@ -155,6 +155,7 @@ void mt76x2_get_power_info(struct mt76x2_dev *dev,
int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t);
bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band);
void mt76x2_read_rx_gain(struct mt76x2_dev *dev);
+void mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev);
static inline bool
mt76x2_temp_tx_alc_enabled(struct mt76x2_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
index 79ab93613e06..b814391f79ac 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
@@ -19,39 +19,6 @@
#include "mt76x2_eeprom.h"
#include "mt76x2_mcu.h"
-struct mt76x2_reg_pair {
- u32 reg;
- u32 value;
-};
-
-static bool
-mt76x2_wait_for_mac(struct mt76x2_dev *dev)
-{
- int i;
-
- for (i = 0; i < 500; i++) {
- switch (mt76_rr(dev, MT_MAC_CSR0)) {
- case 0:
- case ~0:
- break;
- default:
- return true;
- }
- usleep_range(5000, 10000);
- }
-
- return false;
-}
-
-static bool
-wait_for_wpdma(struct mt76x2_dev *dev)
-{
- return mt76_poll(dev, MT_WPDMA_GLO_CFG,
- MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
- MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
- 0, 1000);
-}
-
static void
mt76x2_mac_pbf_init(struct mt76x2_dev *dev)
{
@@ -71,107 +38,6 @@ mt76x2_mac_pbf_init(struct mt76x2_dev *dev)
}
static void
-mt76x2_write_reg_pairs(struct mt76x2_dev *dev,
- const struct mt76x2_reg_pair *data, int len)
-{
- while (len > 0) {
- mt76_wr(dev, data->reg, data->value);
- len--;
- data++;
- }
-}
-
-static void
-mt76_write_mac_initvals(struct mt76x2_dev *dev)
-{
-#define DEFAULT_PROT_CFG \
- (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \
- FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
- FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) | \
- MT_PROT_CFG_RTS_THRESH)
-
-#define DEFAULT_PROT_CFG_20 \
- (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \
- FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \
- FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
- FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x17))
-
-#define DEFAULT_PROT_CFG_40 \
- (FIELD_PREP(MT_PROT_CFG_RATE, 0x2084) | \
- FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \
- FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
- FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f))
-
- static const struct mt76x2_reg_pair vals[] = {
- /* Copied from MediaTek reference source */
- { MT_PBF_SYS_CTRL, 0x00080c00 },
- { MT_PBF_CFG, 0x1efebcff },
- { MT_FCE_PSE_CTRL, 0x00000001 },
- { MT_MAC_SYS_CTRL, 0x0000000c },
- { MT_MAX_LEN_CFG, 0x003e3f00 },
- { MT_AMPDU_MAX_LEN_20M1S, 0xaaa99887 },
- { MT_AMPDU_MAX_LEN_20M2S, 0x000000aa },
- { MT_XIFS_TIME_CFG, 0x33a40d0a },
- { MT_BKOFF_SLOT_CFG, 0x00000209 },
- { MT_TBTT_SYNC_CFG, 0x00422010 },
- { MT_PWR_PIN_CFG, 0x00000000 },
- { 0x1238, 0x001700c8 },
- { MT_TX_SW_CFG0, 0x00101001 },
- { MT_TX_SW_CFG1, 0x00010000 },
- { MT_TX_SW_CFG2, 0x00000000 },
- { MT_TXOP_CTRL_CFG, 0x0400583f },
- { MT_TX_RTS_CFG, 0x00100020 },
- { MT_TX_TIMEOUT_CFG, 0x000a2290 },
- { MT_TX_RETRY_CFG, 0x47f01f0f },
- { MT_EXP_ACK_TIME, 0x002c00dc },
- { MT_TX_PROT_CFG6, 0xe3f42004 },
- { MT_TX_PROT_CFG7, 0xe3f42084 },
- { MT_TX_PROT_CFG8, 0xe3f42104 },
- { MT_PIFS_TX_CFG, 0x00060fff },
- { MT_RX_FILTR_CFG, 0x00015f97 },
- { MT_LEGACY_BASIC_RATE, 0x0000017f },
- { MT_HT_BASIC_RATE, 0x00004003 },
- { MT_PN_PAD_MODE, 0x00000003 },
- { MT_TXOP_HLDR_ET, 0x00000002 },
- { 0xa44, 0x00000000 },
- { MT_HEADER_TRANS_CTRL_REG, 0x00000000 },
- { MT_TSO_CTRL, 0x00000000 },
- { MT_AUX_CLK_CFG, 0x00000000 },
- { MT_DACCLK_EN_DLY_CFG, 0x00000000 },
- { MT_TX_ALC_CFG_4, 0x00000000 },
- { MT_TX_ALC_VGA3, 0x00000000 },
- { MT_TX_PWR_CFG_0, 0x3a3a3a3a },
- { MT_TX_PWR_CFG_1, 0x3a3a3a3a },
- { MT_TX_PWR_CFG_2, 0x3a3a3a3a },
- { MT_TX_PWR_CFG_3, 0x3a3a3a3a },
- { MT_TX_PWR_CFG_4, 0x3a3a3a3a },
- { MT_TX_PWR_CFG_7, 0x3a3a3a3a },
- { MT_TX_PWR_CFG_8, 0x0000003a },
- { MT_TX_PWR_CFG_9, 0x0000003a },
- { MT_EFUSE_CTRL, 0x0000d000 },
- { MT_PAUSE_ENABLE_CONTROL1, 0x0000000a },
- { MT_FCE_WLAN_FLOW_CONTROL1, 0x60401c18 },
- { MT_WPDMA_DELAY_INT_CFG, 0x94ff0000 },
- { MT_TX_SW_CFG3, 0x00000004 },
- { MT_HT_FBK_TO_LEGACY, 0x00001818 },
- { MT_VHT_HT_FBK_CFG1, 0xedcba980 },
- { MT_PROT_AUTO_TX_CFG, 0x00830083 },
- { MT_HT_CTRL_CFG, 0x000001ff },
- };
- struct mt76x2_reg_pair prot_vals[] = {
- { MT_CCK_PROT_CFG, DEFAULT_PROT_CFG },
- { MT_OFDM_PROT_CFG, DEFAULT_PROT_CFG },
- { MT_MM20_PROT_CFG, DEFAULT_PROT_CFG_20 },
- { MT_MM40_PROT_CFG, DEFAULT_PROT_CFG_40 },
- { MT_GF20_PROT_CFG, DEFAULT_PROT_CFG_20 },
- { MT_GF40_PROT_CFG, DEFAULT_PROT_CFG_40 },
- };
-
- mt76x2_write_reg_pairs(dev, vals, ARRAY_SIZE(vals));
- mt76x2_write_reg_pairs(dev, prot_vals, ARRAY_SIZE(prot_vals));
-}
-
-static void
mt76x2_fixup_xtal(struct mt76x2_dev *dev)
{
u16 eep_val;
@@ -360,41 +226,6 @@ int mt76x2_mac_start(struct mt76x2_dev *dev)
return 0;
}
-void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force)
-{
- bool stopped = false;
- u32 rts_cfg;
- int i;
-
- mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
-
- rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
- mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
-
- /* Wait for MAC to become idle */
- for (i = 0; i < 300; i++) {
- if ((mt76_rr(dev, MT_MAC_STATUS) &
- (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) ||
- mt76_rr(dev, MT_BBP(IBI, 12))) {
- udelay(1);
- continue;
- }
-
- stopped = true;
- break;
- }
-
- if (force && !stopped) {
- mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
- mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
-
- mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
- mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
- }
-
- mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
-}
-
void mt76x2_mac_resume(struct mt76x2_dev *dev)
{
mt76_wr(dev, MT_MAC_SYS_CTRL,
@@ -498,45 +329,6 @@ void mt76x2_set_tx_ackto(struct mt76x2_dev *dev)
MT_TX_TIMEOUT_CFG_ACKTO, ackto);
}
-static void
-mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable)
-{
- u32 val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
-
- if (enable)
- val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
- MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
- else
- val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN |
- MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
-
- mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
- udelay(20);
-}
-
-static void
-mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable)
-{
- u32 val;
-
- val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
-
- val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
-
- if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
- val |= MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
- mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
- udelay(20);
-
- val &= ~MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
- }
-
- mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
- udelay(20);
-
- mt76x2_set_wlan_state(dev, enable);
-}
-
int mt76x2_init_hardware(struct mt76x2_dev *dev)
{
static const u16 beacon_offsets[16] = {
@@ -567,11 +359,6 @@ int mt76x2_init_hardware(struct mt76x2_dev *dev)
tasklet_init(&dev->pre_tbtt_tasklet, mt76x2_pre_tbtt_tasklet,
(unsigned long) dev);
- dev->chainmask = 0x202;
- dev->global_wcid.idx = 255;
- dev->global_wcid.hw_key_idx = -1;
- dev->slottime = 9;
-
val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
MT_WPDMA_GLO_CFG_BIG_ENDIAN |
@@ -663,34 +450,6 @@ static void mt76x2_regd_notifier(struct wiphy *wiphy,
mt76x2_dfs_set_domain(dev, request->dfs_region);
}
-#define CCK_RATE(_idx, _rate) { \
- .bitrate = _rate, \
- .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
- .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \
- .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \
-}
-
-#define OFDM_RATE(_idx, _rate) { \
- .bitrate = _rate, \
- .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \
- .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \
-}
-
-static struct ieee80211_rate mt76x2_rates[] = {
- CCK_RATE(0, 10),
- CCK_RATE(1, 20),
- CCK_RATE(2, 55),
- CCK_RATE(3, 110),
- OFDM_RATE(0, 60),
- OFDM_RATE(1, 90),
- OFDM_RATE(2, 120),
- OFDM_RATE(3, 180),
- OFDM_RATE(4, 240),
- OFDM_RATE(5, 360),
- OFDM_RATE(6, 480),
- OFDM_RATE(7, 540),
-};
-
static const struct ieee80211_iface_limit if_limits[] = {
{
.max = 1,
@@ -767,37 +526,6 @@ static void mt76x2_led_set_brightness(struct led_classdev *led_cdev,
mt76x2_led_set_config(mt76, 0xff, 0);
}
-static void
-mt76x2_init_txpower(struct mt76x2_dev *dev,
- struct ieee80211_supported_band *sband)
-{
- struct ieee80211_channel *chan;
- struct mt76x2_tx_power_info txp;
- struct mt76_rate_power t = {};
- int target_power;
- int i;
-
- for (i = 0; i < sband->n_channels; i++) {
- chan = &sband->channels[i];
-
- mt76x2_get_power_info(dev, &txp, chan);
-
- target_power = max_t(int, (txp.chain[0].target_power +
- txp.chain[0].delta),
- (txp.chain[1].target_power +
- txp.chain[1].delta));
-
- mt76x2_get_rate_power(dev, &t, chan);
-
- chan->max_power = mt76x2_get_max_rate_power(&t) +
- target_power;
- chan->max_power /= 2;
-
- /* convert to combined output power on 2x2 devices */
- chan->max_power += 3;
- }
-}
-
int mt76x2_register_device(struct mt76x2_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
@@ -812,20 +540,15 @@ int mt76x2_register_device(struct mt76x2_dev *dev)
return -ENOMEM;
kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
+ INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate);
+ INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work);
+
+ mt76x2_init_device(dev);
ret = mt76x2_init_hardware(dev);
if (ret)
return ret;
- hw->queues = 4;
- hw->max_rates = 1;
- hw->max_report_rates = 7;
- hw->max_rate_tries = 1;
- hw->extra_tx_headroom = 2;
-
- hw->sta_data_size = sizeof(struct mt76x2_sta);
- hw->vif_data_size = sizeof(struct mt76x2_vif);
-
for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) {
u8 *addr = dev->macaddr_list[i].addr;
@@ -845,16 +568,15 @@ int mt76x2_register_device(struct mt76x2_dev *dev)
wiphy->reg_notifier = mt76x2_regd_notifier;
- wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
-
- ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
- ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
-
- INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate);
- INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work);
+ wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_ADHOC);
- dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
- dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
mt76x2_dfs_init_detector(dev);
@@ -862,9 +584,6 @@ int mt76x2_register_device(struct mt76x2_dev *dev)
dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness;
dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink;
- /* init antenna configuration */
- dev->mt76.antenna_mask = 3;
-
ret = mt76_register_device(&dev->mt76, true, mt76x2_rates,
ARRAY_SIZE(mt76x2_rates));
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c
new file mode 100644
index 000000000000..324b2a4b8b67
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+#include "mt76x2_eeprom.h"
+
+#define CCK_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
+ .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \
+ .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \
+}
+
+#define OFDM_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \
+ .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \
+}
+
+struct ieee80211_rate mt76x2_rates[] = {
+ CCK_RATE(0, 10),
+ CCK_RATE(1, 20),
+ CCK_RATE(2, 55),
+ CCK_RATE(3, 110),
+ OFDM_RATE(0, 60),
+ OFDM_RATE(1, 90),
+ OFDM_RATE(2, 120),
+ OFDM_RATE(3, 180),
+ OFDM_RATE(4, 240),
+ OFDM_RATE(5, 360),
+ OFDM_RATE(6, 480),
+ OFDM_RATE(7, 540),
+};
+EXPORT_SYMBOL_GPL(mt76x2_rates);
+
+struct mt76x2_reg_pair {
+ u32 reg;
+ u32 value;
+};
+
+static void
+mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable)
+{
+ u32 val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+ if (enable)
+ val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
+ MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+ else
+ val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN |
+ MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+}
+
+void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+ val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
+
+ if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
+ val |= MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ val &= ~MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
+ }
+
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ mt76x2_set_wlan_state(dev, enable);
+}
+EXPORT_SYMBOL_GPL(mt76x2_reset_wlan);
+
+static void
+mt76x2_write_reg_pairs(struct mt76x2_dev *dev,
+ const struct mt76x2_reg_pair *data, int len)
+{
+ while (len > 0) {
+ mt76_wr(dev, data->reg, data->value);
+ len--;
+ data++;
+ }
+}
+
+void mt76_write_mac_initvals(struct mt76x2_dev *dev)
+{
+#define DEFAULT_PROT_CFG_CCK \
+ (FIELD_PREP(MT_PROT_CFG_RATE, 0x3) | \
+ FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
+ FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) | \
+ MT_PROT_CFG_RTS_THRESH)
+
+#define DEFAULT_PROT_CFG_OFDM \
+ (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \
+ FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
+ FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) | \
+ MT_PROT_CFG_RTS_THRESH)
+
+#define DEFAULT_PROT_CFG_20 \
+ (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \
+ FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \
+ FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
+ FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x17))
+
+#define DEFAULT_PROT_CFG_40 \
+ (FIELD_PREP(MT_PROT_CFG_RATE, 0x2084) | \
+ FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \
+ FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
+ FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f))
+
+ static const struct mt76x2_reg_pair vals[] = {
+ /* Copied from MediaTek reference source */
+ { MT_PBF_SYS_CTRL, 0x00080c00 },
+ { MT_PBF_CFG, 0x1efebcff },
+ { MT_FCE_PSE_CTRL, 0x00000001 },
+ { MT_MAC_SYS_CTRL, 0x0000000c },
+ { MT_MAX_LEN_CFG, 0x003e3f00 },
+ { MT_AMPDU_MAX_LEN_20M1S, 0xaaa99887 },
+ { MT_AMPDU_MAX_LEN_20M2S, 0x000000aa },
+ { MT_XIFS_TIME_CFG, 0x33a40d0a },
+ { MT_BKOFF_SLOT_CFG, 0x00000209 },
+ { MT_TBTT_SYNC_CFG, 0x00422010 },
+ { MT_PWR_PIN_CFG, 0x00000000 },
+ { 0x1238, 0x001700c8 },
+ { MT_TX_SW_CFG0, 0x00101001 },
+ { MT_TX_SW_CFG1, 0x00010000 },
+ { MT_TX_SW_CFG2, 0x00000000 },
+ { MT_TXOP_CTRL_CFG, 0x0400583f },
+ { MT_TX_RTS_CFG, 0x00100020 },
+ { MT_TX_TIMEOUT_CFG, 0x000a2290 },
+ { MT_TX_RETRY_CFG, 0x47f01f0f },
+ { MT_EXP_ACK_TIME, 0x002c00dc },
+ { MT_TX_PROT_CFG6, 0xe3f42004 },
+ { MT_TX_PROT_CFG7, 0xe3f42084 },
+ { MT_TX_PROT_CFG8, 0xe3f42104 },
+ { MT_PIFS_TX_CFG, 0x00060fff },
+ { MT_RX_FILTR_CFG, 0x00015f97 },
+ { MT_LEGACY_BASIC_RATE, 0x0000017f },
+ { MT_HT_BASIC_RATE, 0x00004003 },
+ { MT_PN_PAD_MODE, 0x00000003 },
+ { MT_TXOP_HLDR_ET, 0x00000002 },
+ { 0xa44, 0x00000000 },
+ { MT_HEADER_TRANS_CTRL_REG, 0x00000000 },
+ { MT_TSO_CTRL, 0x00000000 },
+ { MT_AUX_CLK_CFG, 0x00000000 },
+ { MT_DACCLK_EN_DLY_CFG, 0x00000000 },
+ { MT_TX_ALC_CFG_4, 0x00000000 },
+ { MT_TX_ALC_VGA3, 0x00000000 },
+ { MT_TX_PWR_CFG_0, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_1, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_2, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_3, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_4, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_7, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_8, 0x0000003a },
+ { MT_TX_PWR_CFG_9, 0x0000003a },
+ { MT_EFUSE_CTRL, 0x0000d000 },
+ { MT_PAUSE_ENABLE_CONTROL1, 0x0000000a },
+ { MT_FCE_WLAN_FLOW_CONTROL1, 0x60401c18 },
+ { MT_WPDMA_DELAY_INT_CFG, 0x94ff0000 },
+ { MT_TX_SW_CFG3, 0x00000004 },
+ { MT_HT_FBK_TO_LEGACY, 0x00001818 },
+ { MT_VHT_HT_FBK_CFG1, 0xedcba980 },
+ { MT_PROT_AUTO_TX_CFG, 0x00830083 },
+ { MT_HT_CTRL_CFG, 0x000001ff },
+ };
+ struct mt76x2_reg_pair prot_vals[] = {
+ { MT_CCK_PROT_CFG, DEFAULT_PROT_CFG_CCK },
+ { MT_OFDM_PROT_CFG, DEFAULT_PROT_CFG_OFDM },
+ { MT_MM20_PROT_CFG, DEFAULT_PROT_CFG_20 },
+ { MT_MM40_PROT_CFG, DEFAULT_PROT_CFG_40 },
+ { MT_GF20_PROT_CFG, DEFAULT_PROT_CFG_20 },
+ { MT_GF40_PROT_CFG, DEFAULT_PROT_CFG_40 },
+ };
+
+ mt76x2_write_reg_pairs(dev, vals, ARRAY_SIZE(vals));
+ mt76x2_write_reg_pairs(dev, prot_vals, ARRAY_SIZE(prot_vals));
+}
+EXPORT_SYMBOL_GPL(mt76_write_mac_initvals);
+
+void mt76x2_init_device(struct mt76x2_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+
+ hw->queues = 4;
+ hw->max_rates = 1;
+ hw->max_report_rates = 7;
+ hw->max_rate_tries = 1;
+ hw->extra_tx_headroom = 2;
+
+ hw->sta_data_size = sizeof(struct mt76x2_sta);
+ hw->vif_data_size = sizeof(struct mt76x2_vif);
+
+ ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+ ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+
+ dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+ dev->chainmask = 0x202;
+ dev->global_wcid.idx = 255;
+ dev->global_wcid.hw_key_idx = -1;
+ dev->slottime = 9;
+
+ /* init antenna configuration */
+ dev->mt76.antenna_mask = 3;
+}
+EXPORT_SYMBOL_GPL(mt76x2_init_device);
+
+void mt76x2_init_txpower(struct mt76x2_dev *dev,
+ struct ieee80211_supported_band *sband)
+{
+ struct ieee80211_channel *chan;
+ struct mt76x2_tx_power_info txp;
+ struct mt76_rate_power t = {};
+ int target_power;
+ int i;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ chan = &sband->channels[i];
+
+ mt76x2_get_power_info(dev, &txp, chan);
+
+ target_power = max_t(int, (txp.chain[0].target_power +
+ txp.chain[0].delta),
+ (txp.chain[1].target_power +
+ txp.chain[1].delta));
+
+ mt76x2_get_rate_power(dev, &t, chan);
+
+ chan->max_power = mt76x2_get_max_rate_power(&t) +
+ target_power;
+ chan->max_power /= 2;
+
+ /* convert to combined output power on 2x2 devices */
+ chan->max_power += 3;
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x2_init_txpower);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
index fc9af79b3e69..23cf437d14f9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
@@ -28,515 +28,12 @@ void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr)
get_unaligned_le16(addr + 4));
}
-static int
-mt76x2_mac_process_rate(struct mt76_rx_status *status, u16 rate)
-{
- u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
-
- switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
- case MT_PHY_TYPE_OFDM:
- if (idx >= 8)
- idx = 0;
-
- if (status->band == NL80211_BAND_2GHZ)
- idx += 4;
-
- status->rate_idx = idx;
- return 0;
- case MT_PHY_TYPE_CCK:
- if (idx >= 8) {
- idx -= 8;
- status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
- }
-
- if (idx >= 4)
- idx = 0;
-
- status->rate_idx = idx;
- return 0;
- case MT_PHY_TYPE_HT_GF:
- status->enc_flags |= RX_ENC_FLAG_HT_GF;
- /* fall through */
- case MT_PHY_TYPE_HT:
- status->encoding = RX_ENC_HT;
- status->rate_idx = idx;
- break;
- case MT_PHY_TYPE_VHT:
- status->encoding = RX_ENC_VHT;
- status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
- status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
- break;
- default:
- return -EINVAL;
- }
-
- if (rate & MT_RXWI_RATE_LDPC)
- status->enc_flags |= RX_ENC_FLAG_LDPC;
-
- if (rate & MT_RXWI_RATE_SGI)
- status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
-
- if (rate & MT_RXWI_RATE_STBC)
- status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
-
- switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
- case MT_PHY_BW_20:
- break;
- case MT_PHY_BW_40:
- status->bw = RATE_INFO_BW_40;
- break;
- case MT_PHY_BW_80:
- status->bw = RATE_INFO_BW_80;
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-static __le16
-mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev,
- const struct ieee80211_tx_rate *rate, u8 *nss_val)
-{
- u16 rateval;
- u8 phy, rate_idx;
- u8 nss = 1;
- u8 bw = 0;
-
- if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
- rate_idx = rate->idx;
- nss = 1 + (rate->idx >> 4);
- phy = MT_PHY_TYPE_VHT;
- if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
- bw = 2;
- else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- bw = 1;
- } else if (rate->flags & IEEE80211_TX_RC_MCS) {
- rate_idx = rate->idx;
- nss = 1 + (rate->idx >> 3);
- phy = MT_PHY_TYPE_HT;
- if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
- phy = MT_PHY_TYPE_HT_GF;
- if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- bw = 1;
- } else {
- const struct ieee80211_rate *r;
- int band = dev->mt76.chandef.chan->band;
- u16 val;
-
- r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
- if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
- val = r->hw_value_short;
- else
- val = r->hw_value;
-
- phy = val >> 8;
- rate_idx = val & 0xff;
- bw = 0;
- }
-
- rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
- rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
- rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
- if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
- rateval |= MT_RXWI_RATE_SGI;
-
- *nss_val = nss;
- return cpu_to_le16(rateval);
-}
-
-void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop)
-{
- u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
- u32 bit = MT_WCID_DROP_MASK(idx);
-
- /* prevent unnecessary writes */
- if ((val & bit) != (bit * drop))
- mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
-}
-
-void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
- const struct ieee80211_tx_rate *rate)
-{
- spin_lock_bh(&dev->mt76.lock);
- wcid->tx_rate = mt76x2_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
- wcid->tx_rate_set = true;
- spin_unlock_bh(&dev->mt76.lock);
-}
-
-void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_tx_rate *rate = &info->control.rates[0];
- struct ieee80211_key_conf *key = info->control.hw_key;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
- u16 txwi_flags = 0;
- u8 nss;
- s8 txpwr_adj, max_txpwr_adj;
- u8 ccmp_pn[8];
-
- memset(txwi, 0, sizeof(*txwi));
-
- if (wcid)
- txwi->wcid = wcid->idx;
- else
- txwi->wcid = 0xff;
-
- txwi->pktid = 1;
-
- if (wcid && wcid->sw_iv && key) {
- u64 pn = atomic64_inc_return(&key->tx_pn);
- ccmp_pn[0] = pn;
- ccmp_pn[1] = pn >> 8;
- ccmp_pn[2] = 0;
- ccmp_pn[3] = 0x20 | (key->keyidx << 6);
- ccmp_pn[4] = pn >> 16;
- ccmp_pn[5] = pn >> 24;
- ccmp_pn[6] = pn >> 32;
- ccmp_pn[7] = pn >> 40;
- txwi->iv = *((__le32 *)&ccmp_pn[0]);
- txwi->eiv = *((__le32 *)&ccmp_pn[1]);
- }
-
- spin_lock_bh(&dev->mt76.lock);
- if (wcid && (rate->idx < 0 || !rate->count)) {
- txwi->rate = wcid->tx_rate;
- max_txpwr_adj = wcid->max_txpwr_adj;
- nss = wcid->tx_rate_nss;
- } else {
- txwi->rate = mt76x2_mac_tx_rate_val(dev, rate, &nss);
- max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, rate);
- }
- spin_unlock_bh(&dev->mt76.lock);
-
- txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, dev->txpower_conf,
- max_txpwr_adj);
- txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
-
- if (mt76xx_rev(dev) >= MT76XX_REV_E4)
- txwi->txstream = 0x13;
- else if (mt76xx_rev(dev) >= MT76XX_REV_E3 &&
- !(txwi->rate & cpu_to_le16(rate_ht_mask)))
- txwi->txstream = 0x93;
-
- if (info->flags & IEEE80211_TX_CTL_LDPC)
- txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
- if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
- txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
- if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
- txwi_flags |= MT_TXWI_FLAGS_MMPS;
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
- txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
- if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
- txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
- if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
- txwi->pktid |= MT_TXWI_PKTID_PROBE;
- if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
- u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
-
- ba_size <<= sta->ht_cap.ampdu_factor;
- ba_size = min_t(int, 63, ba_size - 1);
- if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
- ba_size = 0;
- txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
-
- txwi_flags |= MT_TXWI_FLAGS_AMPDU |
- FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
- sta->ht_cap.ampdu_density);
- }
-
- if (ieee80211_is_probe_resp(hdr->frame_control) ||
- ieee80211_is_beacon(hdr->frame_control))
- txwi_flags |= MT_TXWI_FLAGS_TS;
-
- txwi->flags |= cpu_to_le16(txwi_flags);
- txwi->len_ctl = cpu_to_le16(skb->len);
-}
-
-static void mt76x2_remove_hdr_pad(struct sk_buff *skb, int len)
-{
- int hdrlen;
-
- if (!len)
- return;
-
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- memmove(skb->data + len, skb->data, hdrlen);
- skb_pull(skb, len);
-}
-
-static struct mt76x2_sta *
-mt76x2_rx_get_sta(struct mt76x2_dev *dev, u8 idx)
-{
- struct mt76_wcid *wcid;
-
- if (idx >= ARRAY_SIZE(dev->wcid))
- return NULL;
-
- wcid = rcu_dereference(dev->wcid[idx]);
- if (!wcid)
- return NULL;
-
- return container_of(wcid, struct mt76x2_sta, wcid);
-}
-
-static struct mt76_wcid *
-mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, struct mt76x2_sta *sta, bool unicast)
-{
- if (!sta)
- return NULL;
-
- if (unicast)
- return &sta->wcid;
- else
- return &sta->vif->group_wcid;
-}
-
-int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
- void *rxi)
-{
- struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
- struct mt76x2_rxwi *rxwi = rxi;
- struct mt76x2_sta *sta;
- u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
- u32 ctl = le32_to_cpu(rxwi->ctl);
- u16 rate = le16_to_cpu(rxwi->rate);
- u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
- bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
- int pad_len = 0;
- u8 pn_len;
- u8 wcid;
- int len;
-
- if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
- return -EINVAL;
-
- if (rxinfo & MT_RXINFO_L2PAD)
- pad_len += 2;
-
- if (rxinfo & MT_RXINFO_DECRYPT) {
- status->flag |= RX_FLAG_DECRYPTED;
- status->flag |= RX_FLAG_MMIC_STRIPPED;
- status->flag |= RX_FLAG_MIC_STRIPPED;
- status->flag |= RX_FLAG_IV_STRIPPED;
- }
-
- wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
- sta = mt76x2_rx_get_sta(dev, wcid);
- status->wcid = mt76x2_rx_get_sta_wcid(dev, sta, unicast);
-
- len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
- pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
- if (pn_len) {
- int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
- u8 *data = skb->data + offset;
-
- status->iv[0] = data[7];
- status->iv[1] = data[6];
- status->iv[2] = data[5];
- status->iv[3] = data[4];
- status->iv[4] = data[1];
- status->iv[5] = data[0];
-
- /*
- * Driver CCMP validation can't deal with fragments.
- * Let mac80211 take care of it.
- */
- if (rxinfo & MT_RXINFO_FRAG) {
- status->flag &= ~RX_FLAG_IV_STRIPPED;
- } else {
- pad_len += pn_len << 2;
- len -= pn_len << 2;
- }
- }
-
- mt76x2_remove_hdr_pad(skb, pad_len);
-
- if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
- status->aggr = true;
-
- if (WARN_ON_ONCE(len > skb->len))
- return -EINVAL;
-
- pskb_trim(skb, len);
- status->chains = BIT(0) | BIT(1);
- status->chain_signal[0] = mt76x2_phy_get_rssi(dev, rxwi->rssi[0], 0);
- status->chain_signal[1] = mt76x2_phy_get_rssi(dev, rxwi->rssi[1], 1);
- status->signal = max(status->chain_signal[0], status->chain_signal[1]);
- status->freq = dev->mt76.chandef.chan->center_freq;
- status->band = dev->mt76.chandef.chan->band;
-
- status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
- status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
-
- if (sta) {
- ewma_signal_add(&sta->rssi, status->signal);
- sta->inactive_count = 0;
- }
-
- return mt76x2_mac_process_rate(status, rate);
-}
-
-static int
-mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
- enum nl80211_band band)
-{
- u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
-
- txrate->idx = 0;
- txrate->flags = 0;
- txrate->count = 1;
-
- switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
- case MT_PHY_TYPE_OFDM:
- if (band == NL80211_BAND_2GHZ)
- idx += 4;
-
- txrate->idx = idx;
- return 0;
- case MT_PHY_TYPE_CCK:
- if (idx >= 8)
- idx -= 8;
-
- txrate->idx = idx;
- return 0;
- case MT_PHY_TYPE_HT_GF:
- txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
- /* fall through */
- case MT_PHY_TYPE_HT:
- txrate->flags |= IEEE80211_TX_RC_MCS;
- txrate->idx = idx;
- break;
- case MT_PHY_TYPE_VHT:
- txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
- txrate->idx = idx;
- break;
- default:
- return -EINVAL;
- }
-
- switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
- case MT_PHY_BW_20:
- break;
- case MT_PHY_BW_40:
- txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- break;
- case MT_PHY_BW_80:
- txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
- break;
- default:
- return -EINVAL;
- }
-
- if (rate & MT_RXWI_RATE_SGI)
- txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
-
- return 0;
-}
-
-static void
-mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev,
- struct ieee80211_tx_info *info,
- struct mt76x2_tx_status *st, int n_frames)
-{
- struct ieee80211_tx_rate *rate = info->status.rates;
- int cur_idx, last_rate;
- int i;
-
- if (!n_frames)
- return;
-
- last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
- mt76x2_mac_process_tx_rate(&rate[last_rate], st->rate,
- dev->mt76.chandef.chan->band);
- if (last_rate < IEEE80211_TX_MAX_RATES - 1)
- rate[last_rate + 1].idx = -1;
-
- cur_idx = rate[last_rate].idx + last_rate;
- for (i = 0; i <= last_rate; i++) {
- rate[i].flags = rate[last_rate].flags;
- rate[i].idx = max_t(int, 0, cur_idx - i);
- rate[i].count = 1;
- }
- rate[last_rate].count = st->retry + 1 - last_rate;
-
- info->status.ampdu_len = n_frames;
- info->status.ampdu_ack_len = st->success ? n_frames : 0;
-
- if (st->pktid & MT_TXWI_PKTID_PROBE)
- info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
-
- if (st->aggr)
- info->flags |= IEEE80211_TX_CTL_AMPDU |
- IEEE80211_TX_STAT_AMPDU;
-
- if (!st->ack_req)
- info->flags |= IEEE80211_TX_CTL_NO_ACK;
- else if (st->success)
- info->flags |= IEEE80211_TX_STAT_ACK;
-}
-
-static void
-mt76x2_send_tx_status(struct mt76x2_dev *dev, struct mt76x2_tx_status *stat,
- u8 *update)
-{
- struct ieee80211_tx_info info = {};
- struct ieee80211_sta *sta = NULL;
- struct mt76_wcid *wcid = NULL;
- struct mt76x2_sta *msta = NULL;
-
- rcu_read_lock();
- if (stat->wcid < ARRAY_SIZE(dev->wcid))
- wcid = rcu_dereference(dev->wcid[stat->wcid]);
-
- if (wcid) {
- void *priv;
-
- priv = msta = container_of(wcid, struct mt76x2_sta, wcid);
- sta = container_of(priv, struct ieee80211_sta,
- drv_priv);
- }
-
- if (msta && stat->aggr) {
- u32 stat_val, stat_cache;
-
- stat_val = stat->rate;
- stat_val |= ((u32) stat->retry) << 16;
- stat_cache = msta->status.rate;
- stat_cache |= ((u32) msta->status.retry) << 16;
-
- if (*update == 0 && stat_val == stat_cache &&
- stat->wcid == msta->status.wcid && msta->n_frames < 32) {
- msta->n_frames++;
- goto out;
- }
-
- mt76x2_mac_fill_tx_status(dev, &info, &msta->status,
- msta->n_frames);
-
- msta->status = *stat;
- msta->n_frames = 1;
- *update = 0;
- } else {
- mt76x2_mac_fill_tx_status(dev, &info, stat, 1);
- *update = 1;
- }
-
- ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
-
-out:
- rcu_read_unlock();
-}
-
void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq)
{
struct mt76x2_tx_status stat = {};
unsigned long flags;
u8 update = 1;
+ bool ret;
if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
return;
@@ -544,26 +41,13 @@ void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq)
trace_mac_txstat_poll(dev);
while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
- u32 stat1, stat2;
-
spin_lock_irqsave(&dev->irq_lock, flags);
- stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
- stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
- if (!(stat1 & MT_TX_STAT_FIFO_VALID)) {
- spin_unlock_irqrestore(&dev->irq_lock, flags);
- break;
- }
-
+ ret = mt76x2_mac_load_tx_status(dev, &stat);
spin_unlock_irqrestore(&dev->irq_lock, flags);
- stat.valid = 1;
- stat.success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
- stat.aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
- stat.ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
- stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
- stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
- stat.retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
- stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
+ if (!ret)
+ break;
+
trace_mac_txstat_fetch(dev, &stat);
if (!irq) {
@@ -612,104 +96,6 @@ void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
dev_kfree_skb_any(e->skb);
}
-static enum mt76x2_cipher_type
-mt76x2_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
-{
- memset(key_data, 0, 32);
- if (!key)
- return MT_CIPHER_NONE;
-
- if (key->keylen > 32)
- return MT_CIPHER_NONE;
-
- memcpy(key_data, key->key, key->keylen);
-
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- return MT_CIPHER_WEP40;
- case WLAN_CIPHER_SUITE_WEP104:
- return MT_CIPHER_WEP104;
- case WLAN_CIPHER_SUITE_TKIP:
- return MT_CIPHER_TKIP;
- case WLAN_CIPHER_SUITE_CCMP:
- return MT_CIPHER_AES_CCMP;
- default:
- return MT_CIPHER_NONE;
- }
-}
-
-void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
-{
- struct mt76_wcid_addr addr = {};
- u32 attr;
-
- attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
- FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
-
- mt76_wr(dev, MT_WCID_ATTR(idx), attr);
-
- mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
- mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
-
- if (idx >= 128)
- return;
-
- if (mac)
- memcpy(addr.macaddr, mac, ETH_ALEN);
-
- mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
-}
-
-int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
- struct ieee80211_key_conf *key)
-{
- enum mt76x2_cipher_type cipher;
- u8 key_data[32];
- u8 iv_data[8];
-
- cipher = mt76x2_mac_get_key_info(key, key_data);
- if (cipher == MT_CIPHER_NONE && key)
- return -EOPNOTSUPP;
-
- mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
- mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
-
- memset(iv_data, 0, sizeof(iv_data));
- if (key) {
- mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
- !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
- iv_data[3] = key->keyidx << 6;
- if (cipher >= MT_CIPHER_TKIP)
- iv_data[3] |= 0x20;
- }
-
- mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
-
- return 0;
-}
-
-int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
- struct ieee80211_key_conf *key)
-{
- enum mt76x2_cipher_type cipher;
- u8 key_data[32];
- u32 val;
-
- cipher = mt76x2_mac_get_key_info(key, key_data);
- if (cipher == MT_CIPHER_NONE && key)
- return -EOPNOTSUPP;
-
- val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
- val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
- val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
- mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
-
- mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
- sizeof(key_data));
-
- return 0;
-}
-
static int
mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb)
{
@@ -719,7 +105,7 @@ mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb)
if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x2_txwi)))
return -ENOSPC;
- mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL);
+ mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
offset += sizeof(txwi);
@@ -854,3 +240,33 @@ void mt76x2_mac_work(struct work_struct *work)
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
MT_CALIBRATE_INTERVAL);
}
+
+void mt76x2_mac_set_tx_protection(struct mt76x2_dev *dev, u32 val)
+{
+ u32 data = 0;
+
+ if (val != ~0)
+ data = FIELD_PREP(MT_PROT_CFG_CTRL, 1) |
+ MT_PROT_CFG_RTS_THRESH;
+
+ mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, val);
+
+ mt76_rmw(dev, MT_CCK_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_OFDM_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_MM20_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_MM40_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_GF20_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_GF40_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_TX_PROT_CFG6,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_TX_PROT_CFG7,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_TX_PROT_CFG8,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
index c048cd06df6b..5af0107ba748 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
@@ -166,7 +166,7 @@ int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
void *rxi);
void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta);
+ struct ieee80211_sta *sta, int len);
void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
struct ieee80211_key_conf *key);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c
new file mode 100644
index 000000000000..6542644bc325
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c
@@ -0,0 +1,699 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+
+void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force)
+{
+ bool stopped = false;
+ u32 rts_cfg;
+ int i;
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
+
+ rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
+ mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
+
+ /* Wait for MAC to become idle */
+ for (i = 0; i < 300; i++) {
+ if ((mt76_rr(dev, MT_MAC_STATUS) &
+ (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) ||
+ mt76_rr(dev, MT_BBP(IBI, 12))) {
+ udelay(1);
+ continue;
+ }
+
+ stopped = true;
+ break;
+ }
+
+ if (force && !stopped) {
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
+
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
+ }
+
+ mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_stop);
+
+bool mt76x2_mac_load_tx_status(struct mt76x2_dev *dev,
+ struct mt76x2_tx_status *stat)
+{
+ u32 stat1, stat2;
+
+ stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
+ stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
+
+ stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
+ if (!stat->valid)
+ return false;
+
+ stat->success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
+ stat->aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
+ stat->ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
+ stat->wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
+ stat->rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
+
+ stat->retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
+ stat->pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_load_tx_status);
+
+static int
+mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
+ enum nl80211_band band)
+{
+ u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+ txrate->idx = 0;
+ txrate->flags = 0;
+ txrate->count = 1;
+
+ switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ if (band == NL80211_BAND_2GHZ)
+ idx += 4;
+
+ txrate->idx = idx;
+ return 0;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8)
+ idx -= 8;
+
+ txrate->idx = idx;
+ return 0;
+ case MT_PHY_TYPE_HT_GF:
+ txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ /* fall through */
+ case MT_PHY_TYPE_HT:
+ txrate->flags |= IEEE80211_TX_RC_MCS;
+ txrate->idx = idx;
+ break;
+ case MT_PHY_TYPE_VHT:
+ txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
+ txrate->idx = idx;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+ case MT_PHY_BW_20:
+ break;
+ case MT_PHY_BW_40:
+ txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ break;
+ case MT_PHY_BW_80:
+ txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (rate & MT_RXWI_RATE_SGI)
+ txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
+
+ return 0;
+}
+
+static void
+mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev,
+ struct ieee80211_tx_info *info,
+ struct mt76x2_tx_status *st, int n_frames)
+{
+ struct ieee80211_tx_rate *rate = info->status.rates;
+ int cur_idx, last_rate;
+ int i;
+
+ if (!n_frames)
+ return;
+
+ last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
+ mt76x2_mac_process_tx_rate(&rate[last_rate], st->rate,
+ dev->mt76.chandef.chan->band);
+ if (last_rate < IEEE80211_TX_MAX_RATES - 1)
+ rate[last_rate + 1].idx = -1;
+
+ cur_idx = rate[last_rate].idx + last_rate;
+ for (i = 0; i <= last_rate; i++) {
+ rate[i].flags = rate[last_rate].flags;
+ rate[i].idx = max_t(int, 0, cur_idx - i);
+ rate[i].count = 1;
+ }
+ rate[last_rate].count = st->retry + 1 - last_rate;
+
+ info->status.ampdu_len = n_frames;
+ info->status.ampdu_ack_len = st->success ? n_frames : 0;
+
+ if (st->pktid & MT_TXWI_PKTID_PROBE)
+ info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+
+ if (st->aggr)
+ info->flags |= IEEE80211_TX_CTL_AMPDU |
+ IEEE80211_TX_STAT_AMPDU;
+
+ if (!st->ack_req)
+ info->flags |= IEEE80211_TX_CTL_NO_ACK;
+ else if (st->success)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+}
+
+void mt76x2_send_tx_status(struct mt76x2_dev *dev,
+ struct mt76x2_tx_status *stat, u8 *update)
+{
+ struct ieee80211_tx_info info = {};
+ struct ieee80211_sta *sta = NULL;
+ struct mt76_wcid *wcid = NULL;
+ struct mt76x2_sta *msta = NULL;
+
+ rcu_read_lock();
+ if (stat->wcid < ARRAY_SIZE(dev->wcid))
+ wcid = rcu_dereference(dev->wcid[stat->wcid]);
+
+ if (wcid) {
+ void *priv;
+
+ priv = msta = container_of(wcid, struct mt76x2_sta, wcid);
+ sta = container_of(priv, struct ieee80211_sta,
+ drv_priv);
+ }
+
+ if (msta && stat->aggr) {
+ u32 stat_val, stat_cache;
+
+ stat_val = stat->rate;
+ stat_val |= ((u32) stat->retry) << 16;
+ stat_cache = msta->status.rate;
+ stat_cache |= ((u32) msta->status.retry) << 16;
+
+ if (*update == 0 && stat_val == stat_cache &&
+ stat->wcid == msta->status.wcid && msta->n_frames < 32) {
+ msta->n_frames++;
+ goto out;
+ }
+
+ mt76x2_mac_fill_tx_status(dev, &info, &msta->status,
+ msta->n_frames);
+
+ msta->status = *stat;
+ msta->n_frames = 1;
+ *update = 0;
+ } else {
+ mt76x2_mac_fill_tx_status(dev, &info, stat, 1);
+ *update = 1;
+ }
+
+ ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
+
+out:
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(mt76x2_send_tx_status);
+
+static enum mt76x2_cipher_type
+mt76x2_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+ memset(key_data, 0, 32);
+ if (!key)
+ return MT_CIPHER_NONE;
+
+ if (key->keylen > 32)
+ return MT_CIPHER_NONE;
+
+ memcpy(key_data, key->key, key->keylen);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return MT_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return MT_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return MT_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return MT_CIPHER_AES_CCMP;
+ default:
+ return MT_CIPHER_NONE;
+ }
+}
+
+int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76x2_cipher_type cipher;
+ u8 key_data[32];
+ u32 val;
+
+ cipher = mt76x2_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EOPNOTSUPP;
+
+ val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
+ val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
+ val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
+ mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
+
+ mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
+ sizeof(key_data));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_shared_key_setup);
+
+int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76x2_cipher_type cipher;
+ u8 key_data[32];
+ u8 iv_data[8];
+
+ cipher = mt76x2_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EOPNOTSUPP;
+
+ mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
+ mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
+
+ memset(iv_data, 0, sizeof(iv_data));
+ if (key) {
+ mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
+ !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
+ iv_data[3] = key->keyidx << 6;
+ if (cipher >= MT_CIPHER_TKIP)
+ iv_data[3] |= 0x20;
+ }
+
+ mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_key);
+
+static __le16
+mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev,
+ const struct ieee80211_tx_rate *rate, u8 *nss_val)
+{
+ u16 rateval;
+ u8 phy, rate_idx;
+ u8 nss = 1;
+ u8 bw = 0;
+
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 4);
+ phy = MT_PHY_TYPE_VHT;
+ if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ bw = 2;
+ else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = 1;
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 3);
+ phy = MT_PHY_TYPE_HT;
+ if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+ phy = MT_PHY_TYPE_HT_GF;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = 1;
+ } else {
+ const struct ieee80211_rate *r;
+ int band = dev->mt76.chandef.chan->band;
+ u16 val;
+
+ r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
+ if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ val = r->hw_value_short;
+ else
+ val = r->hw_value;
+
+ phy = val >> 8;
+ rate_idx = val & 0xff;
+ bw = 0;
+ }
+
+ rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
+ rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
+ rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ rateval |= MT_RXWI_RATE_SGI;
+
+ *nss_val = nss;
+ return cpu_to_le16(rateval);
+}
+
+void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate)
+{
+ spin_lock_bh(&dev->mt76.lock);
+ wcid->tx_rate = mt76x2_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
+ wcid->tx_rate_set = true;
+ spin_unlock_bh(&dev->mt76.lock);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_rate);
+
+void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta, int len)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *rate = &info->control.rates[0];
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
+ u16 txwi_flags = 0;
+ u8 nss;
+ s8 txpwr_adj, max_txpwr_adj;
+ u8 ccmp_pn[8];
+
+ memset(txwi, 0, sizeof(*txwi));
+
+ if (wcid)
+ txwi->wcid = wcid->idx;
+ else
+ txwi->wcid = 0xff;
+
+ txwi->pktid = 1;
+
+ if (wcid && wcid->sw_iv && key) {
+ u64 pn = atomic64_inc_return(&key->tx_pn);
+ ccmp_pn[0] = pn;
+ ccmp_pn[1] = pn >> 8;
+ ccmp_pn[2] = 0;
+ ccmp_pn[3] = 0x20 | (key->keyidx << 6);
+ ccmp_pn[4] = pn >> 16;
+ ccmp_pn[5] = pn >> 24;
+ ccmp_pn[6] = pn >> 32;
+ ccmp_pn[7] = pn >> 40;
+ txwi->iv = *((__le32 *)&ccmp_pn[0]);
+ txwi->eiv = *((__le32 *)&ccmp_pn[1]);
+ }
+
+ spin_lock_bh(&dev->mt76.lock);
+ if (wcid && (rate->idx < 0 || !rate->count)) {
+ txwi->rate = wcid->tx_rate;
+ max_txpwr_adj = wcid->max_txpwr_adj;
+ nss = wcid->tx_rate_nss;
+ } else {
+ txwi->rate = mt76x2_mac_tx_rate_val(dev, rate, &nss);
+ max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, rate);
+ }
+ spin_unlock_bh(&dev->mt76.lock);
+
+ txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, dev->txpower_conf,
+ max_txpwr_adj);
+ txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
+
+ if (mt76xx_rev(dev) >= MT76XX_REV_E4)
+ txwi->txstream = 0x13;
+ else if (mt76xx_rev(dev) >= MT76XX_REV_E3 &&
+ !(txwi->rate & cpu_to_le16(rate_ht_mask)))
+ txwi->txstream = 0x93;
+
+ if (info->flags & IEEE80211_TX_CTL_LDPC)
+ txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
+ if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
+ txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
+ if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+ txwi_flags |= MT_TXWI_FLAGS_MMPS;
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ txwi->pktid |= MT_TXWI_PKTID_PROBE;
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
+ u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
+
+ ba_size <<= sta->ht_cap.ampdu_factor;
+ ba_size = min_t(int, 63, ba_size - 1);
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ ba_size = 0;
+ txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+
+ txwi_flags |= MT_TXWI_FLAGS_AMPDU |
+ FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
+ sta->ht_cap.ampdu_density);
+ }
+
+ if (ieee80211_is_probe_resp(hdr->frame_control) ||
+ ieee80211_is_beacon(hdr->frame_control))
+ txwi_flags |= MT_TXWI_FLAGS_TS;
+
+ txwi->flags |= cpu_to_le16(txwi_flags);
+ txwi->len_ctl = cpu_to_le16(len);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_write_txwi);
+
+void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop)
+{
+ u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
+ u32 bit = MT_WCID_DROP_MASK(idx);
+
+ /* prevent unnecessary writes */
+ if ((val & bit) != (bit * drop))
+ mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_drop);
+
+void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
+{
+ struct mt76_wcid_addr addr = {};
+ u32 attr;
+
+ attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
+ FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+
+ mt76_wr(dev, MT_WCID_ATTR(idx), attr);
+
+ mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
+ mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
+
+ if (idx >= 128)
+ return;
+
+ if (mac)
+ memcpy(addr.macaddr, mac, ETH_ALEN);
+
+ mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_setup);
+
+static int
+mt76x2_mac_process_rate(struct mt76_rx_status *status, u16 rate)
+{
+ u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+ switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ if (idx >= 8)
+ idx = 0;
+
+ if (status->band == NL80211_BAND_2GHZ)
+ idx += 4;
+
+ status->rate_idx = idx;
+ return 0;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8) {
+ idx -= 8;
+ status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+ }
+
+ if (idx >= 4)
+ idx = 0;
+
+ status->rate_idx = idx;
+ return 0;
+ case MT_PHY_TYPE_HT_GF:
+ status->enc_flags |= RX_ENC_FLAG_HT_GF;
+ /* fall through */
+ case MT_PHY_TYPE_HT:
+ status->encoding = RX_ENC_HT;
+ status->rate_idx = idx;
+ break;
+ case MT_PHY_TYPE_VHT:
+ status->encoding = RX_ENC_VHT;
+ status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
+ status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (rate & MT_RXWI_RATE_LDPC)
+ status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+ if (rate & MT_RXWI_RATE_SGI)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ if (rate & MT_RXWI_RATE_STBC)
+ status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
+
+ switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+ case MT_PHY_BW_20:
+ break;
+ case MT_PHY_BW_40:
+ status->bw = RATE_INFO_BW_40;
+ break;
+ case MT_PHY_BW_80:
+ status->bw = RATE_INFO_BW_80;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void mt76x2_remove_hdr_pad(struct sk_buff *skb, int len)
+{
+ int hdrlen;
+
+ if (!len)
+ return;
+
+ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+ memmove(skb->data + len, skb->data, hdrlen);
+ skb_pull(skb, len);
+}
+
+int mt76x2_mac_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain)
+{
+ struct mt76x2_rx_freq_cal *cal = &dev->cal.rx;
+
+ rssi += cal->rssi_offset[chain];
+ rssi -= cal->lna_gain;
+
+ return rssi;
+}
+
+static struct mt76x2_sta *
+mt76x2_rx_get_sta(struct mt76x2_dev *dev, u8 idx)
+{
+ struct mt76_wcid *wcid;
+
+ if (idx >= ARRAY_SIZE(dev->wcid))
+ return NULL;
+
+ wcid = rcu_dereference(dev->wcid[idx]);
+ if (!wcid)
+ return NULL;
+
+ return container_of(wcid, struct mt76x2_sta, wcid);
+}
+
+static struct mt76_wcid *
+mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, struct mt76x2_sta *sta,
+ bool unicast)
+{
+ if (!sta)
+ return NULL;
+
+ if (unicast)
+ return &sta->wcid;
+ else
+ return &sta->vif->group_wcid;
+}
+
+int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
+ void *rxi)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
+ struct mt76x2_rxwi *rxwi = rxi;
+ struct mt76x2_sta *sta;
+ u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
+ u32 ctl = le32_to_cpu(rxwi->ctl);
+ u16 rate = le16_to_cpu(rxwi->rate);
+ u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
+ bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
+ int pad_len = 0;
+ u8 pn_len;
+ u8 wcid;
+ int len;
+
+ if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+ return -EINVAL;
+
+ if (rxinfo & MT_RXINFO_L2PAD)
+ pad_len += 2;
+
+ if (rxinfo & MT_RXINFO_DECRYPT) {
+ status->flag |= RX_FLAG_DECRYPTED;
+ status->flag |= RX_FLAG_MMIC_STRIPPED;
+ status->flag |= RX_FLAG_MIC_STRIPPED;
+ status->flag |= RX_FLAG_IV_STRIPPED;
+ }
+
+ wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
+ sta = mt76x2_rx_get_sta(dev, wcid);
+ status->wcid = mt76x2_rx_get_sta_wcid(dev, sta, unicast);
+
+ len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
+ pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
+ if (pn_len) {
+ int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
+ u8 *data = skb->data + offset;
+
+ status->iv[0] = data[7];
+ status->iv[1] = data[6];
+ status->iv[2] = data[5];
+ status->iv[3] = data[4];
+ status->iv[4] = data[1];
+ status->iv[5] = data[0];
+
+ /*
+ * Driver CCMP validation can't deal with fragments.
+ * Let mac80211 take care of it.
+ */
+ if (rxinfo & MT_RXINFO_FRAG) {
+ status->flag &= ~RX_FLAG_IV_STRIPPED;
+ } else {
+ pad_len += pn_len << 2;
+ len -= pn_len << 2;
+ }
+ }
+
+ mt76x2_remove_hdr_pad(skb, pad_len);
+
+ if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
+ status->aggr = true;
+
+ if (WARN_ON_ONCE(len > skb->len))
+ return -EINVAL;
+
+ pskb_trim(skb, len);
+ status->chains = BIT(0) | BIT(1);
+ status->chain_signal[0] = mt76x2_mac_get_rssi(dev, rxwi->rssi[0], 0);
+ status->chain_signal[1] = mt76x2_mac_get_rssi(dev, rxwi->rssi[1], 1);
+ status->signal = max(status->chain_signal[0], status->chain_signal[1]);
+ status->freq = dev->mt76.chandef.chan->center_freq;
+ status->band = dev->mt76.chandef.chan->band;
+
+ status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
+ status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
+
+ if (sta) {
+ ewma_signal_add(&sta->rssi, status->signal);
+ sta->inactive_count = 0;
+ }
+
+ return mt76x2_mac_process_rate(status, rate);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_process_rx);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
index 3c0ebe6d231c..680a89f8aa87 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
@@ -53,30 +53,6 @@ mt76x2_stop(struct ieee80211_hw *hw)
mutex_unlock(&dev->mutex);
}
-static void
-mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq)
-{
- struct mt76_txq *mtxq;
-
- if (!txq)
- return;
-
- mtxq = (struct mt76_txq *) txq->drv_priv;
- if (txq->sta) {
- struct mt76x2_sta *sta;
-
- sta = (struct mt76x2_sta *) txq->sta->drv_priv;
- mtxq->wcid = &sta->wcid;
- } else {
- struct mt76x2_vif *mvif;
-
- mvif = (struct mt76x2_vif *) txq->vif->drv_priv;
- mtxq->wcid = &mvif->group_wcid;
- }
-
- mt76_txq_init(&dev->mt76, txq);
-}
-
static int
mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
@@ -111,14 +87,6 @@ mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
return 0;
}
-static void
-mt76x2_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
- struct mt76x2_dev *dev = hw->priv;
-
- mt76_txq_remove(&dev->mt76, vif->txq);
-}
-
static int
mt76x2_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef)
{
@@ -194,39 +162,6 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed)
}
static void
-mt76x2_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
- unsigned int *total_flags, u64 multicast)
-{
- struct mt76x2_dev *dev = hw->priv;
- u32 flags = 0;
-
-#define MT76_FILTER(_flag, _hw) do { \
- flags |= *total_flags & FIF_##_flag; \
- dev->rxfilter &= ~(_hw); \
- dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
- } while (0)
-
- mutex_lock(&dev->mutex);
-
- dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
-
- MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
- MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
- MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
- MT_RX_FILTR_CFG_CTS |
- MT_RX_FILTR_CFG_CFEND |
- MT_RX_FILTR_CFG_CFACK |
- MT_RX_FILTR_CFG_BA |
- MT_RX_FILTR_CFG_CTRL_RSV);
- MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
-
- *total_flags = flags;
- mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
-
- mutex_unlock(&dev->mutex);
-}
-
-static void
mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed)
{
@@ -263,68 +198,6 @@ mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_unlock(&dev->mutex);
}
-static int
-mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt76x2_dev *dev = hw->priv;
- struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
- struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
- int ret = 0;
- int idx = 0;
- int i;
-
- mutex_lock(&dev->mutex);
-
- idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
- if (idx < 0) {
- ret = -ENOSPC;
- goto out;
- }
-
- msta->vif = mvif;
- msta->wcid.sta = 1;
- msta->wcid.idx = idx;
- msta->wcid.hw_key_idx = -1;
- mt76x2_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
- mt76x2_mac_wcid_set_drop(dev, idx, false);
- for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
- mt76x2_txq_init(dev, sta->txq[i]);
-
- if (vif->type == NL80211_IFTYPE_AP)
- set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
-
- ewma_signal_init(&msta->rssi);
-
- rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
-
-out:
- mutex_unlock(&dev->mutex);
-
- return ret;
-}
-
-static int
-mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt76x2_dev *dev = hw->priv;
- struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
- int idx = msta->wcid.idx;
- int i;
-
- mutex_lock(&dev->mutex);
- rcu_assign_pointer(dev->wcid[idx], NULL);
- for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
- mt76_txq_remove(&dev->mt76, sta->txq[i]);
- mt76x2_mac_wcid_set_drop(dev, idx, true);
- mt76_wcid_free(dev->wcid_mask, idx);
- mt76x2_mac_wcid_setup(dev, idx, 0, NULL);
- mutex_unlock(&dev->mutex);
-
- return 0;
-}
-
void
mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
{
@@ -336,117 +209,6 @@ mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
mt76x2_mac_wcid_set_drop(dev, idx, ps);
}
-static int
-mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
-{
- struct mt76x2_dev *dev = hw->priv;
- struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
- struct mt76x2_sta *msta;
- struct mt76_wcid *wcid;
- int idx = key->keyidx;
- int ret;
-
- /* fall back to sw encryption for unsupported ciphers */
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- case WLAN_CIPHER_SUITE_TKIP:
- case WLAN_CIPHER_SUITE_CCMP:
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- /*
- * The hardware does not support per-STA RX GTK, fall back
- * to software mode for these.
- */
- if ((vif->type == NL80211_IFTYPE_ADHOC ||
- vif->type == NL80211_IFTYPE_MESH_POINT) &&
- (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
- key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
- !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
- return -EOPNOTSUPP;
-
- msta = sta ? (struct mt76x2_sta *) sta->drv_priv : NULL;
- wcid = msta ? &msta->wcid : &mvif->group_wcid;
-
- if (cmd == SET_KEY) {
- key->hw_key_idx = wcid->idx;
- wcid->hw_key_idx = idx;
- if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
- key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
- wcid->sw_iv = true;
- }
- } else {
- if (idx == wcid->hw_key_idx) {
- wcid->hw_key_idx = -1;
- wcid->sw_iv = true;
- }
-
- key = NULL;
- }
- mt76_wcid_key_setup(&dev->mt76, wcid, key);
-
- if (!msta) {
- if (key || wcid->hw_key_idx == idx) {
- ret = mt76x2_mac_wcid_set_key(dev, wcid->idx, key);
- if (ret)
- return ret;
- }
-
- return mt76x2_mac_shared_key_setup(dev, mvif->idx, idx, key);
- }
-
- return mt76x2_mac_wcid_set_key(dev, msta->wcid.idx, key);
-}
-
-static int
-mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
- const struct ieee80211_tx_queue_params *params)
-{
- struct mt76x2_dev *dev = hw->priv;
- u8 cw_min = 5, cw_max = 10, qid;
- u32 val;
-
- qid = dev->mt76.q_tx[queue].hw_idx;
-
- if (params->cw_min)
- cw_min = fls(params->cw_min);
- if (params->cw_max)
- cw_max = fls(params->cw_max);
-
- val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) |
- FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
- FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
- FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
- mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
-
- val = mt76_rr(dev, MT_WMM_TXOP(qid));
- val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid));
- val |= params->txop << MT_WMM_TXOP_SHIFT(qid);
- mt76_wr(dev, MT_WMM_TXOP(qid), val);
-
- val = mt76_rr(dev, MT_WMM_AIFSN);
- val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid));
- val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid);
- mt76_wr(dev, MT_WMM_AIFSN, val);
-
- val = mt76_rr(dev, MT_WMM_CWMIN);
- val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid));
- val |= cw_min << MT_WMM_CWMIN_SHIFT(qid);
- mt76_wr(dev, MT_WMM_CWMIN, val);
-
- val = mt76_rr(dev, MT_WMM_CWMAX);
- val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid));
- val |= cw_max << MT_WMM_CWMAX_SHIFT(qid);
- mt76_wr(dev, MT_WMM_CWMAX, val);
-
- return 0;
-}
-
static void
mt76x2_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac)
@@ -485,75 +247,6 @@ mt76x2_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm)
return 0;
}
-static int
-mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_ampdu_params *params)
-{
- enum ieee80211_ampdu_mlme_action action = params->action;
- struct ieee80211_sta *sta = params->sta;
- struct mt76x2_dev *dev = hw->priv;
- struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
- struct ieee80211_txq *txq = sta->txq[params->tid];
- u16 tid = params->tid;
- u16 *ssn = &params->ssn;
- struct mt76_txq *mtxq;
-
- if (!txq)
- return -EINVAL;
-
- mtxq = (struct mt76_txq *)txq->drv_priv;
-
- switch (action) {
- case IEEE80211_AMPDU_RX_START:
- mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn, params->buf_size);
- mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
- break;
- case IEEE80211_AMPDU_RX_STOP:
- mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
- mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
- BIT(16 + tid));
- break;
- case IEEE80211_AMPDU_TX_OPERATIONAL:
- mtxq->aggr = true;
- mtxq->send_bar = false;
- ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
- break;
- case IEEE80211_AMPDU_TX_STOP_FLUSH:
- case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
- mtxq->aggr = false;
- ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
- break;
- case IEEE80211_AMPDU_TX_START:
- mtxq->agg_ssn = *ssn << 4;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
- case IEEE80211_AMPDU_TX_STOP_CONT:
- mtxq->aggr = false;
- ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
- }
-
- return 0;
-}
-
-static void
-mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt76x2_dev *dev = hw->priv;
- struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
- struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
- struct ieee80211_tx_rate rate = {};
-
- if (!rates)
- return;
-
- rate.idx = rates->rate[0].idx;
- rate.flags = rates->rate[0].flags;
- mt76x2_mac_wcid_set_rate(dev, &msta->wcid, &rate);
- msta->wcid.max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, &rate);
-}
-
static void mt76x2_set_coverage_class(struct ieee80211_hw *hw,
s16 coverage_class)
{
@@ -605,6 +298,21 @@ static int mt76x2_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant,
return 0;
}
+static int
+mt76x2_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ if (val != ~0 && val > 0xffff)
+ return -EINVAL;
+
+ mutex_lock(&dev->mutex);
+ mt76x2_mac_set_tx_protection(dev, val);
+ mutex_unlock(&dev->mutex);
+
+ return 0;
+}
+
const struct ieee80211_ops mt76x2_ops = {
.tx = mt76x2_tx,
.start = mt76x2_start,
@@ -631,5 +339,6 @@ const struct ieee80211_ops mt76x2_ops = {
.set_tim = mt76x2_set_tim,
.set_antenna = mt76x2_set_antenna,
.get_antenna = mt76x2_get_antenna,
+ .set_rts_threshold = mt76x2_set_rts_threshold,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
index dfd36d736b06..743da57760dc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
@@ -23,23 +23,6 @@
#include "mt76x2_dma.h"
#include "mt76x2_eeprom.h"
-struct mt76x2_fw_header {
- __le32 ilm_len;
- __le32 dlm_len;
- __le16 build_ver;
- __le16 fw_ver;
- u8 pad[4];
- char build_time[16];
-};
-
-struct mt76x2_patch_header {
- char build_time[16];
- char platform[4];
- char hw_version[4];
- char patch_version[4];
- u8 pad[2];
-};
-
static struct sk_buff *mt76x2_mcu_msg_alloc(const void *data, int len)
{
struct sk_buff *skb;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
index d7a7e83262ce..e40293f21417 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
@@ -146,6 +146,23 @@ struct mt76x2_tssi_comp {
u8 offset1;
} __packed __aligned(4);
+struct mt76x2_fw_header {
+ __le32 ilm_len;
+ __le32 dlm_len;
+ __le16 build_ver;
+ __le16 fw_ver;
+ u8 pad[4];
+ char build_time[16];
+};
+
+struct mt76x2_patch_header {
+ char build_time[16];
+ char platform[4];
+ char hw_version[4];
+ char patch_version[4];
+ u8 pad[2];
+};
+
int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
u32 param);
int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev, struct mt76x2_tssi_comp *tssi_data);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
index 20ffa6a40d39..84c96c0415b6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
@@ -19,172 +19,6 @@
#include "mt76x2_mcu.h"
#include "mt76x2_eeprom.h"
-static void
-mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset)
-{
- s8 gain;
-
- gain = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
- gain -= offset / 2;
- mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_LNA_HIGH_GAIN, gain);
-}
-
-static void
-mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset)
-{
- s8 gain;
-
- gain = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
- gain += offset;
- mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain);
-}
-
-static void
-mt76x2_apply_gain_adj(struct mt76x2_dev *dev)
-{
- s8 *gain_adj = dev->cal.rx.high_gain;
-
- mt76x2_adjust_high_lna_gain(dev, 4, gain_adj[0]);
- mt76x2_adjust_high_lna_gain(dev, 5, gain_adj[1]);
-
- mt76x2_adjust_agc_gain(dev, 8, gain_adj[0]);
- mt76x2_adjust_agc_gain(dev, 9, gain_adj[1]);
-}
-
-static u32
-mt76x2_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
-{
- u32 val = 0;
-
- val |= (v1 & (BIT(6) - 1)) << 0;
- val |= (v2 & (BIT(6) - 1)) << 8;
- val |= (v3 & (BIT(6) - 1)) << 16;
- val |= (v4 & (BIT(6) - 1)) << 24;
- return val;
-}
-
-int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain)
-{
- struct mt76x2_rx_freq_cal *cal = &dev->cal.rx;
-
- rssi += cal->rssi_offset[chain];
- rssi -= cal->lna_gain;
-
- return rssi;
-}
-
-static void
-mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset)
-{
- int i;
-
- for (i = 0; i < sizeof(r->all); i++)
- r->all[i] += offset;
-}
-
-static void
-mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit)
-{
- int i;
-
- for (i = 0; i < sizeof(r->all); i++)
- if (r->all[i] > limit)
- r->all[i] = limit;
-}
-
-static int
-mt76x2_get_min_rate_power(struct mt76_rate_power *r)
-{
- int i;
- s8 ret = 0;
-
- for (i = 0; i < sizeof(r->all); i++) {
- if (!r->all[i])
- continue;
-
- if (ret)
- ret = min(ret, r->all[i]);
- else
- ret = r->all[i];
- }
-
- return ret;
-}
-
-
-void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
-{
- enum nl80211_chan_width width = dev->mt76.chandef.width;
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
- struct mt76x2_tx_power_info txp;
- int txp_0, txp_1, delta = 0;
- struct mt76_rate_power t = {};
- int base_power, gain;
-
- mt76x2_get_power_info(dev, &txp, chan);
-
- if (width == NL80211_CHAN_WIDTH_40)
- delta = txp.delta_bw40;
- else if (width == NL80211_CHAN_WIDTH_80)
- delta = txp.delta_bw80;
-
- mt76x2_get_rate_power(dev, &t, chan);
- mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power);
- mt76x2_limit_rate_power(&t, dev->txpower_conf);
- dev->txpower_cur = mt76x2_get_max_rate_power(&t);
-
- base_power = mt76x2_get_min_rate_power(&t);
- delta += base_power - txp.chain[0].target_power;
- txp_0 = txp.chain[0].target_power + txp.chain[0].delta + delta;
- txp_1 = txp.chain[1].target_power + txp.chain[1].delta + delta;
-
- gain = min(txp_0, txp_1);
- if (gain < 0) {
- base_power -= gain;
- txp_0 -= gain;
- txp_1 -= gain;
- } else if (gain > 0x2f) {
- base_power -= gain - 0x2f;
- txp_0 = 0x2f;
- txp_1 = 0x2f;
- }
-
- mt76x2_add_rate_power_offset(&t, -base_power);
- dev->target_power = txp.chain[0].target_power;
- dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power;
- dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
- dev->rate_power = t;
-
- mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
- mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
-
- mt76_wr(dev, MT_TX_PWR_CFG_0,
- mt76x2_tx_power_mask(t.cck[0], t.cck[2], t.ofdm[0], t.ofdm[2]));
- mt76_wr(dev, MT_TX_PWR_CFG_1,
- mt76x2_tx_power_mask(t.ofdm[4], t.ofdm[6], t.ht[0], t.ht[2]));
- mt76_wr(dev, MT_TX_PWR_CFG_2,
- mt76x2_tx_power_mask(t.ht[4], t.ht[6], t.ht[8], t.ht[10]));
- mt76_wr(dev, MT_TX_PWR_CFG_3,
- mt76x2_tx_power_mask(t.ht[12], t.ht[14], t.ht[0], t.ht[2]));
- mt76_wr(dev, MT_TX_PWR_CFG_4,
- mt76x2_tx_power_mask(t.ht[4], t.ht[6], 0, 0));
- mt76_wr(dev, MT_TX_PWR_CFG_7,
- mt76x2_tx_power_mask(t.ofdm[6], t.vht[8], t.ht[6], t.vht[8]));
- mt76_wr(dev, MT_TX_PWR_CFG_8,
- mt76x2_tx_power_mask(t.ht[14], t.vht[8], t.vht[8], 0));
- mt76_wr(dev, MT_TX_PWR_CFG_9,
- mt76x2_tx_power_mask(t.ht[6], t.vht[8], t.vht[8], 0));
-}
-
-static bool
-mt76x2_channel_silent(struct mt76x2_dev *dev)
-{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
-
- return ((chan->flags & IEEE80211_CHAN_RADAR) &&
- chan->dfs_state != NL80211_DFS_AVAILABLE);
-}
-
static bool
mt76x2_phy_tssi_init_cal(struct mt76x2_dev *dev)
{
@@ -243,140 +77,6 @@ mt76x2_phy_channel_calibrate(struct mt76x2_dev *dev, bool mac_stopped)
dev->cal.channel_cal_done = true;
}
-static void
-mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, enum nl80211_band band)
-{
- u32 pa_mode[2];
- u32 pa_mode_adj;
-
- if (band == NL80211_BAND_2GHZ) {
- pa_mode[0] = 0x010055ff;
- pa_mode[1] = 0x00550055;
-
- mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00);
- mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06);
-
- if (mt76x2_ext_pa_enabled(dev, band)) {
- mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00);
- mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00);
- } else {
- mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0xf4000200);
- mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0xfa000200);
- }
- } else {
- pa_mode[0] = 0x0000ffff;
- pa_mode[1] = 0x00ff00ff;
-
- if (mt76x2_ext_pa_enabled(dev, band)) {
- mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400);
- mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476);
- } else {
- mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400);
- mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
- }
-
- if (mt76x2_ext_pa_enabled(dev, band))
- pa_mode_adj = 0x04000000;
- else
- pa_mode_adj = 0;
-
- mt76_wr(dev, MT_RF_PA_MODE_ADJ0, pa_mode_adj);
- mt76_wr(dev, MT_RF_PA_MODE_ADJ1, pa_mode_adj);
- }
-
- mt76_wr(dev, MT_BB_PA_MODE_CFG0, pa_mode[0]);
- mt76_wr(dev, MT_BB_PA_MODE_CFG1, pa_mode[1]);
- mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]);
- mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]);
-
- if (mt76x2_ext_pa_enabled(dev, band)) {
- u32 val;
-
- if (band == NL80211_BAND_2GHZ)
- val = 0x3c3c023c;
- else
- val = 0x363c023c;
-
- mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
- mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
- mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00001818);
- } else {
- if (band == NL80211_BAND_2GHZ) {
- u32 val = 0x0f3c3c3c;
-
- mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
- mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
- mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00000606);
- } else {
- mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x383c023c);
- mt76_wr(dev, MT_TX1_RF_GAIN_CORR, 0x24282e28);
- mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
- }
- }
-}
-
-static void
-mt76x2_configure_tx_delay(struct mt76x2_dev *dev, enum nl80211_band band, u8 bw)
-{
- u32 cfg0, cfg1;
-
- if (mt76x2_ext_pa_enabled(dev, band)) {
- cfg0 = bw ? 0x000b0c01 : 0x00101101;
- cfg1 = 0x00011414;
- } else {
- cfg0 = bw ? 0x000b0b01 : 0x00101001;
- cfg1 = 0x00021414;
- }
- mt76_wr(dev, MT_TX_SW_CFG0, cfg0);
- mt76_wr(dev, MT_TX_SW_CFG1, cfg1);
-
- mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, 15);
-}
-
-static void
-mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl)
-{
- int core_val, agc_val;
-
- switch (width) {
- case NL80211_CHAN_WIDTH_80:
- core_val = 3;
- agc_val = 7;
- break;
- case NL80211_CHAN_WIDTH_40:
- core_val = 2;
- agc_val = 3;
- break;
- default:
- core_val = 0;
- agc_val = 1;
- break;
- }
-
- mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
- mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
- mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
- mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
-}
-
-static void
-mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper)
-{
- switch (band) {
- case NL80211_BAND_2GHZ:
- mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
- mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
- break;
- case NL80211_BAND_5GHZ:
- mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
- mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
- break;
- }
-
- mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
- primary_upper);
-}
-
void mt76x2_phy_set_antenna(struct mt76x2_dev *dev)
{
u32 val;
@@ -500,53 +200,6 @@ mt76x2_phy_adjust_vga_gain(struct mt76x2_dev *dev)
mt76x2_phy_set_gain_val(dev);
}
-static int
-mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev)
-{
- struct mt76x2_sta *sta;
- struct mt76_wcid *wcid;
- int i, j, min_rssi = 0;
- s8 cur_rssi;
-
- local_bh_disable();
- rcu_read_lock();
-
- for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
- unsigned long mask = dev->wcid_mask[i];
-
- if (!mask)
- continue;
-
- for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) {
- if (!(mask & 1))
- continue;
-
- wcid = rcu_dereference(dev->wcid[j]);
- if (!wcid)
- continue;
-
- sta = container_of(wcid, struct mt76x2_sta, wcid);
- spin_lock(&dev->mt76.rx_lock);
- if (sta->inactive_count++ < 5)
- cur_rssi = ewma_signal_read(&sta->rssi);
- else
- cur_rssi = 0;
- spin_unlock(&dev->mt76.rx_lock);
-
- if (cur_rssi < min_rssi)
- min_rssi = cur_rssi;
- }
- }
-
- rcu_read_unlock();
- local_bh_enable();
-
- if (!min_rssi)
- return -75;
-
- return min_rssi;
-}
-
static void
mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c
new file mode 100644
index 000000000000..9fd6ab4cbb94
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c
@@ -0,0 +1,349 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+#include "mt76x2_eeprom.h"
+
+static void
+mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset)
+{
+ s8 gain;
+
+ gain = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
+ gain -= offset / 2;
+ mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_LNA_HIGH_GAIN, gain);
+}
+
+static void
+mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset)
+{
+ s8 gain;
+
+ gain = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
+ gain += offset;
+ mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain);
+}
+
+void mt76x2_apply_gain_adj(struct mt76x2_dev *dev)
+{
+ s8 *gain_adj = dev->cal.rx.high_gain;
+
+ mt76x2_adjust_high_lna_gain(dev, 4, gain_adj[0]);
+ mt76x2_adjust_high_lna_gain(dev, 5, gain_adj[1]);
+
+ mt76x2_adjust_agc_gain(dev, 8, gain_adj[0]);
+ mt76x2_adjust_agc_gain(dev, 9, gain_adj[1]);
+}
+EXPORT_SYMBOL_GPL(mt76x2_apply_gain_adj);
+
+void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
+ enum nl80211_band band)
+{
+ u32 pa_mode[2];
+ u32 pa_mode_adj;
+
+ if (band == NL80211_BAND_2GHZ) {
+ pa_mode[0] = 0x010055ff;
+ pa_mode[1] = 0x00550055;
+
+ mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00);
+ mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06);
+
+ if (mt76x2_ext_pa_enabled(dev, band)) {
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00);
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00);
+ } else {
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0xf4000200);
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0xfa000200);
+ }
+ } else {
+ pa_mode[0] = 0x0000ffff;
+ pa_mode[1] = 0x00ff00ff;
+
+ if (mt76x2_ext_pa_enabled(dev, band)) {
+ mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400);
+ mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476);
+ } else {
+ mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400);
+ mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
+ }
+
+ if (mt76x2_ext_pa_enabled(dev, band))
+ pa_mode_adj = 0x04000000;
+ else
+ pa_mode_adj = 0;
+
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ0, pa_mode_adj);
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ1, pa_mode_adj);
+ }
+
+ mt76_wr(dev, MT_BB_PA_MODE_CFG0, pa_mode[0]);
+ mt76_wr(dev, MT_BB_PA_MODE_CFG1, pa_mode[1]);
+ mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]);
+ mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]);
+
+ if (mt76x2_ext_pa_enabled(dev, band)) {
+ u32 val;
+
+ if (band == NL80211_BAND_2GHZ)
+ val = 0x3c3c023c;
+ else
+ val = 0x363c023c;
+
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
+ mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
+ mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00001818);
+ } else {
+ if (band == NL80211_BAND_2GHZ) {
+ u32 val = 0x0f3c3c3c;
+
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
+ mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
+ mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00000606);
+ } else {
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x383c023c);
+ mt76_wr(dev, MT_TX1_RF_GAIN_CORR, 0x24282e28);
+ mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower_regs);
+
+static void
+mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit)
+{
+ int i;
+
+ for (i = 0; i < sizeof(r->all); i++)
+ if (r->all[i] > limit)
+ r->all[i] = limit;
+}
+
+static u32
+mt76x2_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
+{
+ u32 val = 0;
+
+ val |= (v1 & (BIT(6) - 1)) << 0;
+ val |= (v2 & (BIT(6) - 1)) << 8;
+ val |= (v3 & (BIT(6) - 1)) << 16;
+ val |= (v4 & (BIT(6) - 1)) << 24;
+ return val;
+}
+
+static void
+mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset)
+{
+ int i;
+
+ for (i = 0; i < sizeof(r->all); i++)
+ r->all[i] += offset;
+}
+
+static int
+mt76x2_get_min_rate_power(struct mt76_rate_power *r)
+{
+ int i;
+ s8 ret = 0;
+
+ for (i = 0; i < sizeof(r->all); i++) {
+ if (!r->all[i])
+ continue;
+
+ if (ret)
+ ret = min(ret, r->all[i]);
+ else
+ ret = r->all[i];
+ }
+
+ return ret;
+}
+
+void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
+{
+ enum nl80211_chan_width width = dev->mt76.chandef.width;
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct mt76x2_tx_power_info txp;
+ int txp_0, txp_1, delta = 0;
+ struct mt76_rate_power t = {};
+ int base_power, gain;
+
+ mt76x2_get_power_info(dev, &txp, chan);
+
+ if (width == NL80211_CHAN_WIDTH_40)
+ delta = txp.delta_bw40;
+ else if (width == NL80211_CHAN_WIDTH_80)
+ delta = txp.delta_bw80;
+
+ mt76x2_get_rate_power(dev, &t, chan);
+ mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power);
+ mt76x2_limit_rate_power(&t, dev->txpower_conf);
+ dev->txpower_cur = mt76x2_get_max_rate_power(&t);
+
+ base_power = mt76x2_get_min_rate_power(&t);
+ delta += base_power - txp.chain[0].target_power;
+ txp_0 = txp.chain[0].target_power + txp.chain[0].delta + delta;
+ txp_1 = txp.chain[1].target_power + txp.chain[1].delta + delta;
+
+ gain = min(txp_0, txp_1);
+ if (gain < 0) {
+ base_power -= gain;
+ txp_0 -= gain;
+ txp_1 -= gain;
+ } else if (gain > 0x2f) {
+ base_power -= gain - 0x2f;
+ txp_0 = 0x2f;
+ txp_1 = 0x2f;
+ }
+
+ mt76x2_add_rate_power_offset(&t, -base_power);
+ dev->target_power = txp.chain[0].target_power;
+ dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power;
+ dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
+ dev->rate_power = t;
+
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
+
+ mt76_wr(dev, MT_TX_PWR_CFG_0,
+ mt76x2_tx_power_mask(t.cck[0], t.cck[2], t.ofdm[0], t.ofdm[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_1,
+ mt76x2_tx_power_mask(t.ofdm[4], t.ofdm[6], t.ht[0], t.ht[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_2,
+ mt76x2_tx_power_mask(t.ht[4], t.ht[6], t.ht[8], t.ht[10]));
+ mt76_wr(dev, MT_TX_PWR_CFG_3,
+ mt76x2_tx_power_mask(t.ht[12], t.ht[14], t.ht[0], t.ht[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_4,
+ mt76x2_tx_power_mask(t.ht[4], t.ht[6], 0, 0));
+ mt76_wr(dev, MT_TX_PWR_CFG_7,
+ mt76x2_tx_power_mask(t.ofdm[6], t.vht[8], t.ht[6], t.vht[8]));
+ mt76_wr(dev, MT_TX_PWR_CFG_8,
+ mt76x2_tx_power_mask(t.ht[14], t.vht[8], t.vht[8], 0));
+ mt76_wr(dev, MT_TX_PWR_CFG_9,
+ mt76x2_tx_power_mask(t.ht[6], t.vht[8], t.vht[8], 0));
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower);
+
+void mt76x2_configure_tx_delay(struct mt76x2_dev *dev,
+ enum nl80211_band band, u8 bw)
+{
+ u32 cfg0, cfg1;
+
+ if (mt76x2_ext_pa_enabled(dev, band)) {
+ cfg0 = bw ? 0x000b0c01 : 0x00101101;
+ cfg1 = 0x00011414;
+ } else {
+ cfg0 = bw ? 0x000b0b01 : 0x00101001;
+ cfg1 = 0x00021414;
+ }
+ mt76_wr(dev, MT_TX_SW_CFG0, cfg0);
+ mt76_wr(dev, MT_TX_SW_CFG1, cfg1);
+
+ mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, 15);
+}
+EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay);
+
+void mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl)
+{
+ int core_val, agc_val;
+
+ switch (width) {
+ case NL80211_CHAN_WIDTH_80:
+ core_val = 3;
+ agc_val = 7;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ core_val = 2;
+ agc_val = 3;
+ break;
+ default:
+ core_val = 0;
+ agc_val = 1;
+ break;
+ }
+
+ mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
+ mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
+ mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
+ mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_set_bw);
+
+void mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper)
+{
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+ mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+ break;
+ case NL80211_BAND_5GHZ:
+ mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+ mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+ break;
+ }
+
+ mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
+ primary_upper);
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_set_band);
+
+int mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev)
+{
+ struct mt76x2_sta *sta;
+ struct mt76_wcid *wcid;
+ int i, j, min_rssi = 0;
+ s8 cur_rssi;
+
+ local_bh_disable();
+ rcu_read_lock();
+
+ for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
+ unsigned long mask = dev->wcid_mask[i];
+
+ if (!mask)
+ continue;
+
+ for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+
+ wcid = rcu_dereference(dev->wcid[j]);
+ if (!wcid)
+ continue;
+
+ sta = container_of(wcid, struct mt76x2_sta, wcid);
+ spin_lock(&dev->mt76.rx_lock);
+ if (sta->inactive_count++ < 5)
+ cur_rssi = ewma_signal_read(&sta->rssi);
+ else
+ cur_rssi = 0;
+ spin_unlock(&dev->mt76.rx_lock);
+
+ if (cur_rssi < min_rssi)
+ min_rssi = cur_rssi;
+ }
+ }
+
+ rcu_read_unlock();
+ local_bh_enable();
+
+ if (!min_rssi)
+ return -75;
+
+ return min_rssi;
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_get_min_avg_rssi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
index b9c334d9e5b8..1551ea453180 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
@@ -75,6 +75,21 @@
#define MT_XO_CTRL7 0x011c
+#define MT_USB_U3DMA_CFG 0x9018
+#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT GENMASK(7, 0)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT GENMASK(15, 8)
+#define MT_USB_DMA_CFG_UDMA_TX_WL_DROP BIT(16)
+#define MT_USB_DMA_CFG_WAKE_UP_EN BIT(17)
+#define MT_USB_DMA_CFG_RX_DROP_OR_PAD BIT(18)
+#define MT_USB_DMA_CFG_TX_CLR BIT(19)
+#define MT_USB_DMA_CFG_TXOP_HALT BIT(20)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21)
+#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22)
+#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23)
+#define MT_USB_DMA_CFG_EP_OUT_VALID GENMASK(29, 24)
+#define MT_USB_DMA_CFG_RX_BUSY BIT(30)
+#define MT_USB_DMA_CFG_TX_BUSY BIT(31)
+
#define MT_WLAN_MTC_CTRL 0x10148
#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0)
#define MT_WLAN_MTC_CTRL_PWR_ACK BIT(12)
@@ -150,6 +165,9 @@
#define MT_TX_HW_QUEUE_MCU 8
#define MT_TX_HW_QUEUE_MGMT 9
+#define MT_US_CYC_CFG 0x02a4
+#define MT_US_CYC_CNT GENMASK(7, 0)
+
#define MT_PBF_SYS_CTRL 0x0400
#define MT_PBF_SYS_CTRL_MCU_RESET BIT(0)
#define MT_PBF_SYS_CTRL_DMA_RESET BIT(1)
@@ -202,6 +220,11 @@
#define MT_FCE_WLAN_FLOW_CONTROL1 0x0824
+#define MT_TX_CPU_FROM_FCE_BASE_PTR 0x09a0
+#define MT_TX_CPU_FROM_FCE_MAX_COUNT 0x09a4
+#define MT_FCE_PDMA_GLOBAL_CONF 0x09c4
+#define MT_FCE_SKIP_FS 0x0a6c
+
#define MT_PAUSE_ENABLE_CONTROL1 0x0a38
#define MT_MAC_CSR0 0x1000
@@ -214,6 +237,7 @@
#define MT_MAC_ADDR_DW0 0x1008
#define MT_MAC_ADDR_DW1 0x100c
+#define MT_MAC_ADDR_DW1_U2ME_MASK GENMASK(23, 16)
#define MT_MAC_BSSID_DW0 0x1010
#define MT_MAC_BSSID_DW1 0x1014
@@ -351,6 +375,7 @@
#define MT_TX_TIMEOUT_CFG_ACKTO GENMASK(15, 8)
#define MT_TX_RETRY_CFG 0x134c
+#define MT_TX_LINK_CFG 0x1350
#define MT_VHT_HT_FBK_CFG1 0x1358
#define MT_PROT_CFG_RATE GENMASK(15, 0)
@@ -425,6 +450,7 @@
#define MT_RX_FILTR_CFG_BAR BIT(15)
#define MT_RX_FILTR_CFG_CTRL_RSV BIT(16)
+#define MT_AUTO_RSP_CFG 0x1404
#define MT_LEGACY_BASIC_RATE 0x1408
#define MT_HT_BASIC_RATE 0x140c
@@ -460,6 +486,10 @@
#define MT_RX_STAT_2_DUP_ERRORS GENMASK(15, 0)
#define MT_RX_STAT_2_OVERFLOW_ERRORS GENMASK(31, 16)
+#define MT_TX_STA_0 0x170c
+#define MT_TX_STA_1 0x1710
+#define MT_TX_STA_2 0x1714
+
#define MT_TX_STAT_FIFO 0x1718
#define MT_TX_STAT_FIFO_VALID BIT(0)
#define MT_TX_STAT_FIFO_SUCCESS BIT(5)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
index 560376dd1133..4c907882e8b0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
@@ -23,129 +23,6 @@ struct beacon_bc_data {
struct sk_buff *tail[8];
};
-void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
- struct sk_buff *skb)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct mt76x2_dev *dev = hw->priv;
- struct ieee80211_vif *vif = info->control.vif;
- struct mt76_wcid *wcid = &dev->global_wcid;
-
- if (control->sta) {
- struct mt76x2_sta *msta;
-
- msta = (struct mt76x2_sta *) control->sta->drv_priv;
- wcid = &msta->wcid;
- /* sw encrypted frames */
- if (!info->control.hw_key && wcid->hw_key_idx != -1)
- control->sta = NULL;
- }
-
- if (vif && !control->sta) {
- struct mt76x2_vif *mvif;
-
- mvif = (struct mt76x2_vif *) vif->drv_priv;
- wcid = &mvif->group_wcid;
- }
-
- mt76_tx(&dev->mt76, control->sta, wcid, skb);
-}
-
-void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- ieee80211_free_txskb(mt76_hw(dev), skb);
- } else {
- ieee80211_tx_info_clear_status(info);
- info->status.rates[0].idx = -1;
- info->flags |= IEEE80211_TX_STAT_ACK;
- ieee80211_tx_status(mt76_hw(dev), skb);
- }
-}
-
-s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
- const struct ieee80211_tx_rate *rate)
-{
- s8 max_txpwr;
-
- if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
- u8 mcs = ieee80211_rate_get_vht_mcs(rate);
-
- if (mcs == 8 || mcs == 9) {
- max_txpwr = dev->rate_power.vht[8];
- } else {
- u8 nss, idx;
-
- nss = ieee80211_rate_get_vht_nss(rate);
- idx = ((nss - 1) << 3) + mcs;
- max_txpwr = dev->rate_power.ht[idx & 0xf];
- }
- } else if (rate->flags & IEEE80211_TX_RC_MCS) {
- max_txpwr = dev->rate_power.ht[rate->idx & 0xf];
- } else {
- enum nl80211_band band = dev->mt76.chandef.chan->band;
-
- if (band == NL80211_BAND_2GHZ) {
- const struct ieee80211_rate *r;
- struct wiphy *wiphy = mt76_hw(dev)->wiphy;
- struct mt76_rate_power *rp = &dev->rate_power;
-
- r = &wiphy->bands[band]->bitrates[rate->idx];
- if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
- max_txpwr = rp->cck[r->hw_value & 0x3];
- else
- max_txpwr = rp->ofdm[r->hw_value & 0x7];
- } else {
- max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7];
- }
- }
-
- return max_txpwr;
-}
-
-s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj)
-{
- txpwr = min_t(s8, txpwr, dev->txpower_conf);
- txpwr -= (dev->target_power + dev->target_power_delta[0]);
- txpwr = min_t(s8, txpwr, max_txpwr_adj);
-
- if (!dev->enable_tpc)
- return 0;
- else if (txpwr >= 0)
- return min_t(s8, txpwr, 7);
- else
- return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
-}
-
-void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr)
-{
- s8 txpwr_adj;
-
- txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, txpwr,
- dev->rate_power.ofdm[4]);
- mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
- MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
- mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
- MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj);
-}
-
-static int mt76x2_insert_hdr_pad(struct sk_buff *skb)
-{
- int len = ieee80211_get_hdrlen_from_skb(skb);
-
- if (len % 4 == 0)
- return 0;
-
- skb_push(skb, 2);
- memmove(skb->data, skb->data + 2, len);
-
- skb->data[len] = 0;
- skb->data[len + 1] = 0;
- return 2;
-}
-
int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
struct sk_buff *skb, struct mt76_queue *q,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
@@ -159,7 +36,7 @@ int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128)
mt76x2_mac_wcid_set_drop(dev, wcid->idx, false);
- mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta);
+ mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len);
ret = mt76x2_insert_hdr_pad(skb);
if (ret < 0)
@@ -289,7 +166,8 @@ void mt76x2_pre_tbtt_tasklet(unsigned long arg)
struct ieee80211_vif *vif = info->control.vif;
struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
- mt76_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid, NULL);
+ mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid,
+ NULL);
}
spin_unlock_bh(&q->lock);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c
new file mode 100644
index 000000000000..36afb166fa3f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+#include "mt76x2_dma.h"
+
+void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76x2_dev *dev = hw->priv;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_wcid *wcid = &dev->global_wcid;
+
+ if (control->sta) {
+ struct mt76x2_sta *msta;
+
+ msta = (struct mt76x2_sta *)control->sta->drv_priv;
+ wcid = &msta->wcid;
+ /* sw encrypted frames */
+ if (!info->control.hw_key && wcid->hw_key_idx != -1)
+ control->sta = NULL;
+ }
+
+ if (vif && !control->sta) {
+ struct mt76x2_vif *mvif;
+
+ mvif = (struct mt76x2_vif *)vif->drv_priv;
+ wcid = &mvif->group_wcid;
+ }
+
+ mt76_tx(&dev->mt76, control->sta, wcid, skb);
+}
+EXPORT_SYMBOL_GPL(mt76x2_tx);
+
+int mt76x2_insert_hdr_pad(struct sk_buff *skb)
+{
+ int len = ieee80211_get_hdrlen_from_skb(skb);
+
+ if (len % 4 == 0)
+ return 0;
+
+ skb_push(skb, 2);
+ memmove(skb->data, skb->data + 2, len);
+
+ skb->data[len] = 0;
+ skb->data[len + 1] = 0;
+ return 2;
+}
+EXPORT_SYMBOL_GPL(mt76x2_insert_hdr_pad);
+
+s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
+ const struct ieee80211_tx_rate *rate)
+{
+ s8 max_txpwr;
+
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ u8 mcs = ieee80211_rate_get_vht_mcs(rate);
+
+ if (mcs == 8 || mcs == 9) {
+ max_txpwr = dev->rate_power.vht[8];
+ } else {
+ u8 nss, idx;
+
+ nss = ieee80211_rate_get_vht_nss(rate);
+ idx = ((nss - 1) << 3) + mcs;
+ max_txpwr = dev->rate_power.ht[idx & 0xf];
+ }
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ max_txpwr = dev->rate_power.ht[rate->idx & 0xf];
+ } else {
+ enum nl80211_band band = dev->mt76.chandef.chan->band;
+
+ if (band == NL80211_BAND_2GHZ) {
+ const struct ieee80211_rate *r;
+ struct wiphy *wiphy = mt76_hw(dev)->wiphy;
+ struct mt76_rate_power *rp = &dev->rate_power;
+
+ r = &wiphy->bands[band]->bitrates[rate->idx];
+ if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
+ max_txpwr = rp->cck[r->hw_value & 0x3];
+ else
+ max_txpwr = rp->ofdm[r->hw_value & 0x7];
+ } else {
+ max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7];
+ }
+ }
+
+ return max_txpwr;
+}
+EXPORT_SYMBOL_GPL(mt76x2_tx_get_max_txpwr_adj);
+
+s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj)
+{
+ txpwr = min_t(s8, txpwr, dev->txpower_conf);
+ txpwr -= (dev->target_power + dev->target_power_delta[0]);
+ txpwr = min_t(s8, txpwr, max_txpwr_adj);
+
+ if (!dev->enable_tpc)
+ return 0;
+ else if (txpwr >= 0)
+ return min_t(s8, txpwr, 7);
+ else
+ return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
+}
+EXPORT_SYMBOL_GPL(mt76x2_tx_get_txpwr_adj);
+
+void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr)
+{
+ s8 txpwr_adj;
+
+ txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, txpwr,
+ dev->rate_power.ofdm[4]);
+ mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+ MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
+ mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+ MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj);
+}
+EXPORT_SYMBOL_GPL(mt76x2_tx_set_txpwr_auto);
+
+void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ ieee80211_free_txskb(mt76_hw(dev), skb);
+ } else {
+ ieee80211_tx_info_clear_status(info);
+ info->status.rates[0].idx = -1;
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ ieee80211_tx_status(mt76_hw(dev), skb);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x2_tx_complete);
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c
new file mode 100644
index 000000000000..1428cfdee579
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "mt76x2u.h"
+
+static const struct usb_device_id mt76x2u_device_table[] = {
+ { USB_DEVICE(0x0b05, 0x1833) }, /* Asus USB-AC54 */
+ { USB_DEVICE(0x0b05, 0x17eb) }, /* Asus USB-AC55 */
+ { USB_DEVICE(0x0b05, 0x180b) }, /* Asus USB-N53 B1 */
+ { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USB-AC1200 */
+ { USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */
+ { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */
+ { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
+ { USB_DEVICE(0x045e, 0x02e6) }, /* XBox One Wireless Adapter */
+ { },
+};
+
+static int mt76x2u_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct mt76x2_dev *dev;
+ int err;
+
+ dev = mt76x2u_alloc_device(&intf->dev);
+ if (!dev)
+ return -ENOMEM;
+
+ udev = usb_get_dev(udev);
+ usb_reset_device(udev);
+
+ err = mt76u_init(&dev->mt76, intf);
+ if (err < 0)
+ goto err;
+
+ dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
+ dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
+
+ err = mt76x2u_register_device(dev);
+ if (err < 0)
+ goto err;
+
+ return 0;
+
+err:
+ ieee80211_free_hw(mt76_hw(dev));
+ usb_set_intfdata(intf, NULL);
+ usb_put_dev(udev);
+
+ return err;
+}
+
+static void mt76x2u_disconnect(struct usb_interface *intf)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct mt76x2_dev *dev = usb_get_intfdata(intf);
+ struct ieee80211_hw *hw = mt76_hw(dev);
+
+ set_bit(MT76_REMOVED, &dev->mt76.state);
+ ieee80211_unregister_hw(hw);
+ mt76x2u_cleanup(dev);
+
+ ieee80211_free_hw(hw);
+ usb_set_intfdata(intf, NULL);
+ usb_put_dev(udev);
+}
+
+static int __maybe_unused mt76x2u_suspend(struct usb_interface *intf,
+ pm_message_t state)
+{
+ struct mt76x2_dev *dev = usb_get_intfdata(intf);
+ struct mt76_usb *usb = &dev->mt76.usb;
+
+ mt76u_stop_queues(&dev->mt76);
+ mt76x2u_stop_hw(dev);
+ usb_kill_urb(usb->mcu.res.urb);
+
+ return 0;
+}
+
+static int __maybe_unused mt76x2u_resume(struct usb_interface *intf)
+{
+ struct mt76x2_dev *dev = usb_get_intfdata(intf);
+ struct mt76_usb *usb = &dev->mt76.usb;
+ int err;
+
+ reinit_completion(&usb->mcu.cmpl);
+ err = mt76u_submit_buf(&dev->mt76, USB_DIR_IN,
+ MT_EP_IN_CMD_RESP,
+ &usb->mcu.res, GFP_KERNEL,
+ mt76u_mcu_complete_urb,
+ &usb->mcu.cmpl);
+ if (err < 0)
+ return err;
+
+ err = mt76u_submit_rx_buffers(&dev->mt76);
+ if (err < 0)
+ return err;
+
+ tasklet_enable(&usb->rx_tasklet);
+ tasklet_enable(&usb->tx_tasklet);
+
+ return mt76x2u_init_hardware(dev);
+}
+
+MODULE_DEVICE_TABLE(usb, mt76x2u_device_table);
+MODULE_FIRMWARE(MT7662U_FIRMWARE);
+MODULE_FIRMWARE(MT7662U_ROM_PATCH);
+
+static struct usb_driver mt76x2u_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt76x2u_device_table,
+ .probe = mt76x2u_probe,
+ .disconnect = mt76x2u_disconnect,
+#ifdef CONFIG_PM
+ .suspend = mt76x2u_suspend,
+ .resume = mt76x2u_resume,
+ .reset_resume = mt76x2u_resume,
+#endif /* CONFIG_PM */
+ .soft_unbind = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(mt76x2u_driver);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u.h b/drivers/net/wireless/mediatek/mt76/mt76x2u.h
new file mode 100644
index 000000000000..008092f0cd8a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2U_H
+#define __MT76x2U_H
+
+#include <linux/device.h>
+
+#include "mt76x2.h"
+#include "mt76x2_dma.h"
+#include "mt76x2_mcu.h"
+
+#define MT7612U_EEPROM_SIZE 512
+
+#define MT_USB_AGGR_SIZE_LIMIT 21 /* 1024B unit */
+#define MT_USB_AGGR_TIMEOUT 0x80 /* 33ns unit */
+
+extern const struct ieee80211_ops mt76x2u_ops;
+
+struct mt76x2_dev *mt76x2u_alloc_device(struct device *pdev);
+int mt76x2u_register_device(struct mt76x2_dev *dev);
+int mt76x2u_init_hardware(struct mt76x2_dev *dev);
+void mt76x2u_cleanup(struct mt76x2_dev *dev);
+void mt76x2u_stop_hw(struct mt76x2_dev *dev);
+
+void mt76x2u_mac_setaddr(struct mt76x2_dev *dev, u8 *addr);
+int mt76x2u_mac_reset(struct mt76x2_dev *dev);
+void mt76x2u_mac_resume(struct mt76x2_dev *dev);
+int mt76x2u_mac_start(struct mt76x2_dev *dev);
+int mt76x2u_mac_stop(struct mt76x2_dev *dev);
+
+int mt76x2u_phy_set_channel(struct mt76x2_dev *dev,
+ struct cfg80211_chan_def *chandef);
+void mt76x2u_phy_calibrate(struct work_struct *work);
+void mt76x2u_phy_channel_calibrate(struct mt76x2_dev *dev);
+void mt76x2u_phy_set_txdac(struct mt76x2_dev *dev);
+void mt76x2u_phy_set_rxpath(struct mt76x2_dev *dev);
+
+void mt76x2u_mcu_complete_urb(struct urb *urb);
+int mt76x2u_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
+ u8 bw_index, bool scan);
+int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
+ u32 val);
+int mt76x2u_mcu_tssi_comp(struct mt76x2_dev *dev,
+ struct mt76x2_tssi_comp *tssi_data);
+int mt76x2u_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
+ bool force);
+int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap,
+ bool ext, int rssi, u32 false_cca);
+int mt76x2u_mcu_set_radio_state(struct mt76x2_dev *dev, bool val);
+int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type,
+ u8 temp_level, u8 channel);
+int mt76x2u_mcu_init(struct mt76x2_dev *dev);
+int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev);
+void mt76x2u_mcu_deinit(struct mt76x2_dev *dev);
+
+int mt76x2u_alloc_queues(struct mt76x2_dev *dev);
+void mt76x2u_queues_deinit(struct mt76x2_dev *dev);
+void mt76x2u_stop_queues(struct mt76x2_dev *dev);
+bool mt76x2u_tx_status_data(struct mt76_dev *mdev, u8 *update);
+int mt76x2u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
+ struct sk_buff *skb, struct mt76_queue *q,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+ u32 *tx_info);
+void mt76x2u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+ struct mt76_queue_entry *e, bool flush);
+int mt76x2u_skb_dma_info(struct sk_buff *skb, enum dma_msg_port port,
+ u32 flags);
+
+#endif /* __MT76x2U_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c
new file mode 100644
index 000000000000..1ca5dd05b265
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2u.h"
+#include "dma.h"
+
+static void mt76x2u_remove_dma_hdr(struct sk_buff *skb)
+{
+ int hdr_len;
+
+ skb_pull(skb, sizeof(struct mt76x2_txwi) + MT_DMA_HDR_LEN);
+ hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+ if (hdr_len % 4) {
+ memmove(skb->data + 2, skb->data, hdr_len);
+ skb_pull(skb, 2);
+ }
+}
+
+static int
+mt76x2u_check_skb_rooms(struct sk_buff *skb)
+{
+ int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+ u32 need_head;
+
+ need_head = sizeof(struct mt76x2_txwi) + MT_DMA_HDR_LEN;
+ if (hdr_len % 4)
+ need_head += 2;
+ return skb_cow(skb, need_head);
+}
+
+static int
+mt76x2u_set_txinfo(struct sk_buff *skb,
+ struct mt76_wcid *wcid, u8 ep)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ enum mt76x2_qsel qsel;
+ u32 flags;
+
+ if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
+ ep == MT_EP_OUT_HCCA)
+ qsel = MT_QSEL_MGMT;
+ else
+ qsel = MT_QSEL_EDCA;
+
+ flags = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
+ MT_TXD_INFO_80211;
+ if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
+ flags |= MT_TXD_INFO_WIV;
+
+ return mt76u_skb_dma_info(skb, WLAN_PORT, flags);
+}
+
+bool mt76x2u_tx_status_data(struct mt76_dev *mdev, u8 *update)
+{
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+ struct mt76x2_tx_status stat;
+
+ if (!mt76x2_mac_load_tx_status(dev, &stat))
+ return false;
+
+ mt76x2_send_tx_status(dev, &stat, update);
+
+ return true;
+}
+
+int mt76x2u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
+ struct sk_buff *skb, struct mt76_queue *q,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+ u32 *tx_info)
+{
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+ struct mt76x2_txwi *txwi;
+ int err, len = skb->len;
+
+ err = mt76x2u_check_skb_rooms(skb);
+ if (err < 0)
+ return -ENOMEM;
+
+ mt76x2_insert_hdr_pad(skb);
+
+ txwi = skb_push(skb, sizeof(struct mt76x2_txwi));
+ mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
+
+ return mt76x2u_set_txinfo(skb, wcid, q2ep(q->hw_idx));
+}
+
+void mt76x2u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+ struct mt76_queue_entry *e, bool flush)
+{
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+
+ mt76x2u_remove_dma_hdr(e->skb);
+ mt76x2_tx_complete(dev, e->skb);
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c
new file mode 100644
index 000000000000..9b81e7641c06
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+
+#include "mt76x2u.h"
+#include "mt76x2_eeprom.h"
+
+static void mt76x2u_init_dma(struct mt76x2_dev *dev)
+{
+ u32 val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
+
+ val |= MT_USB_DMA_CFG_RX_DROP_OR_PAD |
+ MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN;
+
+ /* disable AGGR_BULK_RX in order to receive one
+ * frame in each rx urb and avoid copies
+ */
+ val &= ~MT_USB_DMA_CFG_RX_BULK_AGG_EN;
+ mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
+}
+
+static void mt76x2u_power_on_rf_patch(struct mt76x2_dev *dev)
+{
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) | BIT(16));
+ udelay(1);
+
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x1c), 0xff);
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x1c), 0x30);
+
+ mt76_wr(dev, MT_VEND_ADDR(CFG, 0x14), 0x484f);
+ udelay(1);
+
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(17));
+ usleep_range(150, 200);
+
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x130), BIT(16));
+ usleep_range(50, 100);
+
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x14c), BIT(19) | BIT(20));
+}
+
+static void mt76x2u_power_on_rf(struct mt76x2_dev *dev, int unit)
+{
+ int shift = unit ? 8 : 0;
+ u32 val = (BIT(1) | BIT(3) | BIT(4) | BIT(5)) << shift;
+
+ /* Enable RF BG */
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) << shift);
+ usleep_range(10, 20);
+
+ /* Enable RFDIG LDO/AFE/ABB/ADDA */
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), val);
+ usleep_range(10, 20);
+
+ /* Switch RFDIG power to internal LDO */
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x130), BIT(2) << shift);
+ usleep_range(10, 20);
+
+ mt76x2u_power_on_rf_patch(dev);
+
+ mt76_set(dev, 0x530, 0xf);
+}
+
+static void mt76x2u_power_on(struct mt76x2_dev *dev)
+{
+ u32 val;
+
+ /* Turn on WL MTCMOS */
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x148),
+ MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP);
+
+ val = MT_WLAN_MTC_CTRL_STATE_UP |
+ MT_WLAN_MTC_CTRL_PWR_ACK |
+ MT_WLAN_MTC_CTRL_PWR_ACK_S;
+
+ mt76_poll(dev, MT_VEND_ADDR(CFG, 0x148), val, val, 1000);
+
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0x7f << 16);
+ usleep_range(10, 20);
+
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0xf << 24);
+ usleep_range(10, 20);
+
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x148), 0xf << 24);
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0xfff);
+
+ /* Turn on AD/DA power down */
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x1204), BIT(3));
+
+ /* WLAN function enable */
+ mt76_set(dev, MT_VEND_ADDR(CFG, 0x80), BIT(0));
+
+ /* Release BBP software reset */
+ mt76_clear(dev, MT_VEND_ADDR(CFG, 0x64), BIT(18));
+
+ mt76x2u_power_on_rf(dev, 0);
+ mt76x2u_power_on_rf(dev, 1);
+}
+
+static int mt76x2u_init_eeprom(struct mt76x2_dev *dev)
+{
+ u32 val, i;
+
+ dev->mt76.eeprom.data = devm_kzalloc(dev->mt76.dev,
+ MT7612U_EEPROM_SIZE,
+ GFP_KERNEL);
+ dev->mt76.eeprom.size = MT7612U_EEPROM_SIZE;
+ if (!dev->mt76.eeprom.data)
+ return -ENOMEM;
+
+ for (i = 0; i + 4 <= MT7612U_EEPROM_SIZE; i += 4) {
+ val = mt76_rr(dev, MT_VEND_ADDR(EEPROM, i));
+ put_unaligned_le32(val, dev->mt76.eeprom.data + i);
+ }
+
+ mt76x2_eeprom_parse_hw_cap(dev);
+ return 0;
+}
+
+struct mt76x2_dev *mt76x2u_alloc_device(struct device *pdev)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ .tx_prepare_skb = mt76x2u_tx_prepare_skb,
+ .tx_complete_skb = mt76x2u_tx_complete_skb,
+ .tx_status_data = mt76x2u_tx_status_data,
+ .rx_skb = mt76x2_queue_rx_skb,
+ };
+ struct mt76x2_dev *dev;
+ struct mt76_dev *mdev;
+
+ mdev = mt76_alloc_device(sizeof(*dev), &mt76x2u_ops);
+ if (!mdev)
+ return NULL;
+
+ dev = container_of(mdev, struct mt76x2_dev, mt76);
+ mdev->dev = pdev;
+ mdev->drv = &drv_ops;
+
+ mutex_init(&dev->mutex);
+
+ return dev;
+}
+
+static void mt76x2u_init_beacon_offsets(struct mt76x2_dev *dev)
+{
+ mt76_wr(dev, MT_BCN_OFFSET(0), 0x18100800);
+ mt76_wr(dev, MT_BCN_OFFSET(1), 0x38302820);
+ mt76_wr(dev, MT_BCN_OFFSET(2), 0x58504840);
+ mt76_wr(dev, MT_BCN_OFFSET(3), 0x78706860);
+}
+
+int mt76x2u_init_hardware(struct mt76x2_dev *dev)
+{
+ static const u16 beacon_offsets[] = {
+ /* 512 byte per beacon */
+ 0xc000, 0xc200, 0xc400, 0xc600,
+ 0xc800, 0xca00, 0xcc00, 0xce00,
+ 0xd000, 0xd200, 0xd400, 0xd600,
+ 0xd800, 0xda00, 0xdc00, 0xde00
+ };
+ const struct mt76_wcid_addr addr = {
+ .macaddr = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ .ba_mask = 0,
+ };
+ int i, err;
+
+ dev->beacon_offsets = beacon_offsets;
+
+ mt76x2_reset_wlan(dev, true);
+ mt76x2u_power_on(dev);
+
+ if (!mt76x2_wait_for_mac(dev))
+ return -ETIMEDOUT;
+
+ err = mt76x2u_mcu_fw_init(dev);
+ if (err < 0)
+ return err;
+
+ if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100))
+ return -EIO;
+
+ /* wait for asic ready after fw load. */
+ if (!mt76x2_wait_for_mac(dev))
+ return -ETIMEDOUT;
+
+ mt76_wr(dev, MT_HEADER_TRANS_CTRL_REG, 0);
+ mt76_wr(dev, MT_TSO_CTRL, 0);
+
+ mt76x2u_init_dma(dev);
+
+ err = mt76x2u_mcu_init(dev);
+ if (err < 0)
+ return err;
+
+ err = mt76x2u_mac_reset(dev);
+ if (err < 0)
+ return err;
+
+ mt76x2u_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
+ dev->rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
+
+ mt76x2u_init_beacon_offsets(dev);
+
+ if (!mt76x2_wait_for_bbp(dev))
+ return -ETIMEDOUT;
+
+ /* reset wcid table */
+ for (i = 0; i < 254; i++)
+ mt76_wr_copy(dev, MT_WCID_ADDR(i), &addr,
+ sizeof(struct mt76_wcid_addr));
+
+ /* reset shared key table and pairwise key table */
+ for (i = 0; i < 4; i++)
+ mt76_wr(dev, MT_SKEY_MODE_BASE_0 + 4 * i, 0);
+ for (i = 0; i < 256; i++)
+ mt76_wr(dev, MT_WCID_ATTR(i), 1);
+
+ mt76_clear(dev, MT_BEACON_TIME_CFG,
+ MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE |
+ MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_BEACON_TX);
+
+ mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
+ mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x583f);
+
+ err = mt76x2u_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0);
+ if (err < 0)
+ return err;
+
+ mt76x2u_phy_set_rxpath(dev);
+ mt76x2u_phy_set_txdac(dev);
+
+ return mt76x2u_mac_stop(dev);
+}
+
+int mt76x2u_register_device(struct mt76x2_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct wiphy *wiphy = hw->wiphy;
+ int err;
+
+ INIT_DELAYED_WORK(&dev->cal_work, mt76x2u_phy_calibrate);
+ mt76x2_init_device(dev);
+
+ err = mt76x2u_init_eeprom(dev);
+ if (err < 0)
+ return err;
+
+ err = mt76u_mcu_init_rx(&dev->mt76);
+ if (err < 0)
+ return err;
+
+ err = mt76u_alloc_queues(&dev->mt76);
+ if (err < 0)
+ goto fail;
+
+ err = mt76x2u_init_hardware(dev);
+ if (err < 0)
+ goto fail;
+
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+
+ err = mt76_register_device(&dev->mt76, true, mt76x2_rates,
+ ARRAY_SIZE(mt76x2_rates));
+ if (err)
+ goto fail;
+
+ /* check hw sg support in order to enable AMSDU */
+ if (mt76u_check_sg(&dev->mt76))
+ hw->max_tx_fragments = MT_SG_MAX_SIZE;
+ else
+ hw->max_tx_fragments = 1;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+ mt76x2_init_debugfs(dev);
+ mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
+ mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);
+
+ return 0;
+
+fail:
+ mt76x2u_cleanup(dev);
+ return err;
+}
+
+void mt76x2u_stop_hw(struct mt76x2_dev *dev)
+{
+ mt76u_stop_stat_wk(&dev->mt76);
+ cancel_delayed_work_sync(&dev->cal_work);
+ mt76x2u_mac_stop(dev);
+}
+
+void mt76x2u_cleanup(struct mt76x2_dev *dev)
+{
+ mt76x2u_mcu_set_radio_state(dev, false);
+ mt76x2u_stop_hw(dev);
+ mt76u_queues_deinit(&dev->mt76);
+ mt76x2u_mcu_deinit(dev);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c
new file mode 100644
index 000000000000..eab7ab297aa6
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2u.h"
+#include "mt76x2_eeprom.h"
+
+static void mt76x2u_mac_reset_counters(struct mt76x2_dev *dev)
+{
+ mt76_rr(dev, MT_RX_STAT_0);
+ mt76_rr(dev, MT_RX_STAT_1);
+ mt76_rr(dev, MT_RX_STAT_2);
+ mt76_rr(dev, MT_TX_STA_0);
+ mt76_rr(dev, MT_TX_STA_1);
+ mt76_rr(dev, MT_TX_STA_2);
+}
+
+static void mt76x2u_mac_fixup_xtal(struct mt76x2_dev *dev)
+{
+ s8 offset = 0;
+ u16 eep_val;
+
+ eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
+
+ offset = eep_val & 0x7f;
+ if ((eep_val & 0xff) == 0xff)
+ offset = 0;
+ else if (eep_val & 0x80)
+ offset = 0 - offset;
+
+ eep_val >>= 8;
+ if (eep_val == 0x00 || eep_val == 0xff) {
+ eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
+ eep_val &= 0xff;
+
+ if (eep_val == 0x00 || eep_val == 0xff)
+ eep_val = 0x14;
+ }
+
+ eep_val &= 0x7f;
+ mt76_rmw_field(dev, MT_VEND_ADDR(CFG, MT_XO_CTRL5),
+ MT_XO_CTRL5_C2_VAL, eep_val + offset);
+ mt76_set(dev, MT_VEND_ADDR(CFG, MT_XO_CTRL6), MT_XO_CTRL6_C2_CTRL);
+
+ mt76_wr(dev, 0x504, 0x06000000);
+ mt76_wr(dev, 0x50c, 0x08800000);
+ mdelay(5);
+ mt76_wr(dev, 0x504, 0x0);
+
+ /* decrease SIFS from 16us to 13us */
+ mt76_rmw_field(dev, MT_XIFS_TIME_CFG,
+ MT_XIFS_TIME_CFG_OFDM_SIFS, 0xd);
+ mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG, MT_BKOFF_SLOT_CFG_CC_DELAY, 1);
+
+ /* init fce */
+ mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
+
+ eep_val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2);
+ switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
+ case 0:
+ mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
+ break;
+ case 1:
+ mt76_wr(dev, MT_XO_CTRL7, 0x5c1feed0);
+ break;
+ default:
+ break;
+ }
+}
+
+int mt76x2u_mac_reset(struct mt76x2_dev *dev)
+{
+ mt76_wr(dev, MT_WPDMA_GLO_CFG, BIT(4) | BIT(5));
+
+ /* init pbf regs */
+ mt76_wr(dev, MT_PBF_TX_MAX_PCNT, 0xefef3f1f);
+ mt76_wr(dev, MT_PBF_RX_MAX_PCNT, 0xfebf);
+
+ mt76_write_mac_initvals(dev);
+
+ mt76_wr(dev, MT_TX_LINK_CFG, 0x1020);
+ mt76_wr(dev, MT_AUTO_RSP_CFG, 0x13);
+ mt76_wr(dev, MT_MAX_LEN_CFG, 0x2f00);
+ mt76_wr(dev, MT_TX_RTS_CFG, 0x92b20);
+
+ mt76_wr(dev, MT_WMM_AIFSN, 0x2273);
+ mt76_wr(dev, MT_WMM_CWMIN, 0x2344);
+ mt76_wr(dev, MT_WMM_CWMAX, 0x34aa);
+
+ mt76_clear(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_RESET_CSR |
+ MT_MAC_SYS_CTRL_RESET_BBP);
+
+ if (is_mt7612(dev))
+ mt76_clear(dev, MT_COEXCFG0, MT_COEXCFG0_COEX_EN);
+
+ mt76_set(dev, MT_EXT_CCA_CFG, 0xf000);
+ mt76_clear(dev, MT_TX_ALC_CFG_4, BIT(31));
+
+ mt76x2u_mac_fixup_xtal(dev);
+
+ return 0;
+}
+
+int mt76x2u_mac_start(struct mt76x2_dev *dev)
+{
+ mt76x2u_mac_reset_counters(dev);
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+ wait_for_wpdma(dev);
+ usleep_range(50, 100);
+
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX |
+ MT_MAC_SYS_CTRL_ENABLE_RX);
+
+ return 0;
+}
+
+int mt76x2u_mac_stop(struct mt76x2_dev *dev)
+{
+ int i, count = 0, val;
+ bool stopped = false;
+ u32 rts_cfg;
+
+ if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ return -EIO;
+
+ rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
+ mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
+
+ mt76_clear(dev, MT_TXOP_CTRL_CFG, BIT(20));
+ mt76_clear(dev, MT_TXOP_HLDR_ET, BIT(1));
+
+ /* wait tx dma to stop */
+ for (i = 0; i < 2000; i++) {
+ val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
+ if (!(val & MT_USB_DMA_CFG_TX_BUSY) && i > 10)
+ break;
+ usleep_range(50, 100);
+ }
+
+ /* page count on TxQ */
+ for (i = 0; i < 200; i++) {
+ if (!(mt76_rr(dev, 0x0438) & 0xffffffff) &&
+ !(mt76_rr(dev, 0x0a30) & 0x000000ff) &&
+ !(mt76_rr(dev, 0x0a34) & 0xff00ff00))
+ break;
+ usleep_range(10, 20);
+ }
+
+ /* disable tx-rx */
+ mt76_clear(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_RX |
+ MT_MAC_SYS_CTRL_ENABLE_TX);
+
+ /* Wait for MAC to become idle */
+ for (i = 0; i < 1000; i++) {
+ if (!(mt76_rr(dev, MT_MAC_STATUS) & MT_MAC_STATUS_TX) &&
+ !mt76_rr(dev, MT_BBP(IBI, 12))) {
+ stopped = true;
+ break;
+ }
+ usleep_range(10, 20);
+ }
+
+ if (!stopped) {
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
+
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
+ }
+
+ /* page count on RxQ */
+ for (i = 0; i < 200; i++) {
+ if (!(mt76_rr(dev, 0x0430) & 0x00ff0000) &&
+ !(mt76_rr(dev, 0x0a30) & 0xffffffff) &&
+ !(mt76_rr(dev, 0x0a34) & 0xffffffff) &&
+ ++count > 10)
+ break;
+ msleep(50);
+ }
+
+ if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 2000))
+ dev_warn(dev->mt76.dev, "MAC RX failed to stop\n");
+
+ /* wait rx dma to stop */
+ for (i = 0; i < 2000; i++) {
+ val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
+ if (!(val & MT_USB_DMA_CFG_RX_BUSY) && i > 10)
+ break;
+ usleep_range(50, 100);
+ }
+
+ mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
+
+ return 0;
+}
+
+void mt76x2u_mac_resume(struct mt76x2_dev *dev)
+{
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX |
+ MT_MAC_SYS_CTRL_ENABLE_RX);
+ mt76_set(dev, MT_TXOP_CTRL_CFG, BIT(20));
+ mt76_set(dev, MT_TXOP_HLDR_ET, BIT(1));
+}
+
+void mt76x2u_mac_setaddr(struct mt76x2_dev *dev, u8 *addr)
+{
+ ether_addr_copy(dev->mt76.macaddr, addr);
+
+ if (!is_valid_ether_addr(dev->mt76.macaddr)) {
+ eth_random_addr(dev->mt76.macaddr);
+ dev_info(dev->mt76.dev,
+ "Invalid MAC address, using random address %pM\n",
+ dev->mt76.macaddr);
+ }
+
+ mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mt76.macaddr));
+ mt76_wr(dev, MT_MAC_ADDR_DW1,
+ get_unaligned_le16(dev->mt76.macaddr + 4) |
+ FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c
new file mode 100644
index 000000000000..7367ba111119
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2u.h"
+
+static int mt76x2u_start(struct ieee80211_hw *hw)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ int ret;
+
+ mutex_lock(&dev->mutex);
+
+ ret = mt76x2u_mac_start(dev);
+ if (ret)
+ goto out;
+
+ set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+
+out:
+ mutex_unlock(&dev->mutex);
+ return ret;
+}
+
+static void mt76x2u_stop(struct ieee80211_hw *hw)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mutex);
+ clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ mt76x2u_stop_hw(dev);
+ mutex_unlock(&dev->mutex);
+}
+
+static int mt76x2u_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_vif *mvif = (struct mt76x2_vif *)vif->drv_priv;
+ unsigned int idx = 0;
+
+ if (!ether_addr_equal(dev->mt76.macaddr, vif->addr))
+ mt76x2u_mac_setaddr(dev, vif->addr);
+
+ mvif->idx = idx;
+ mvif->group_wcid.idx = MT_VIF_WCID(idx);
+ mvif->group_wcid.hw_key_idx = -1;
+ mt76x2_txq_init(dev, vif->txq);
+
+ return 0;
+}
+
+static int
+mt76x2u_set_channel(struct mt76x2_dev *dev,
+ struct cfg80211_chan_def *chandef)
+{
+ int err;
+
+ cancel_delayed_work_sync(&dev->cal_work);
+ set_bit(MT76_RESET, &dev->mt76.state);
+
+ mt76_set_channel(&dev->mt76);
+
+ mt76_clear(dev, MT_TXOP_CTRL_CFG, BIT(20));
+ mt76_clear(dev, MT_TXOP_HLDR_ET, BIT(1));
+ mt76x2_mac_stop(dev, false);
+
+ err = mt76x2u_phy_set_channel(dev, chandef);
+
+ mt76x2u_mac_resume(dev);
+
+ clear_bit(MT76_RESET, &dev->mt76.state);
+ mt76_txq_schedule_all(&dev->mt76);
+
+ return err;
+}
+
+static void
+mt76x2u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mutex);
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ mt76x2u_phy_channel_calibrate(dev);
+ mt76x2_apply_gain_adj(dev);
+ }
+
+ if (changed & BSS_CHANGED_BSSID) {
+ mt76_wr(dev, MT_MAC_BSSID_DW0,
+ get_unaligned_le32(info->bssid));
+ mt76_wr(dev, MT_MAC_BSSID_DW1,
+ get_unaligned_le16(info->bssid + 4));
+ }
+
+ mutex_unlock(&dev->mutex);
+}
+
+static int
+mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ int err = 0;
+
+ mutex_lock(&dev->mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+ dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC;
+ else
+ dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ieee80211_stop_queues(hw);
+ err = mt76x2u_set_channel(dev, &hw->conf.chandef);
+ ieee80211_wake_queues(hw);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ dev->txpower_conf = hw->conf.power_level * 2;
+
+ /* convert to per-chain power for 2x2 devices */
+ dev->txpower_conf -= 6;
+
+ if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+ mt76x2_phy_set_txpower(dev);
+ }
+
+ mutex_unlock(&dev->mutex);
+
+ return err;
+}
+
+static void
+mt76x2u_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ const u8 *mac)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ set_bit(MT76_SCANNING, &dev->mt76.state);
+}
+
+static void
+mt76x2u_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ clear_bit(MT76_SCANNING, &dev->mt76.state);
+}
+
+const struct ieee80211_ops mt76x2u_ops = {
+ .tx = mt76x2_tx,
+ .start = mt76x2u_start,
+ .stop = mt76x2u_stop,
+ .add_interface = mt76x2u_add_interface,
+ .remove_interface = mt76x2_remove_interface,
+ .sta_add = mt76x2_sta_add,
+ .sta_remove = mt76x2_sta_remove,
+ .set_key = mt76x2_set_key,
+ .ampdu_action = mt76x2_ampdu_action,
+ .config = mt76x2u_config,
+ .wake_tx_queue = mt76_wake_tx_queue,
+ .bss_info_changed = mt76x2u_bss_info_changed,
+ .configure_filter = mt76x2_configure_filter,
+ .conf_tx = mt76x2_conf_tx,
+ .sw_scan_start = mt76x2u_sw_scan,
+ .sw_scan_complete = mt76x2u_sw_scan_complete,
+ .sta_rate_tbl_update = mt76x2_sta_rate_tbl_update,
+};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c
new file mode 100644
index 000000000000..22c16d638baa
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/firmware.h>
+
+#include "mt76x2u.h"
+#include "mt76x2_eeprom.h"
+
+#define MT_CMD_HDR_LEN 4
+#define MT_INBAND_PACKET_MAX_LEN 192
+#define MT_MCU_MEMMAP_WLAN 0x410000
+
+#define MCU_FW_URB_MAX_PAYLOAD 0x3900
+#define MCU_ROM_PATCH_MAX_PAYLOAD 2048
+
+#define MT76U_MCU_ILM_OFFSET 0x80000
+#define MT76U_MCU_DLM_OFFSET 0x110000
+#define MT76U_MCU_ROM_PATCH_OFFSET 0x90000
+
+static int
+mt76x2u_mcu_function_select(struct mt76x2_dev *dev, enum mcu_function func,
+ u32 val)
+{
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(func),
+ .value = cpu_to_le32(val),
+ };
+ struct sk_buff *skb;
+
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_FUN_SET_OP,
+ func != Q_SELECT);
+}
+
+int mt76x2u_mcu_set_radio_state(struct mt76x2_dev *dev, bool val)
+{
+ struct {
+ __le32 mode;
+ __le32 level;
+ } __packed __aligned(4) msg = {
+ .mode = cpu_to_le32(val ? RADIO_ON : RADIO_OFF),
+ .level = cpu_to_le32(0),
+ };
+ struct sk_buff *skb;
+
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_POWER_SAVING_OP,
+ false);
+}
+
+int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
+ u8 channel)
+{
+ struct {
+ u8 cr_mode;
+ u8 temp;
+ u8 ch;
+ u8 _pad0;
+ __le32 cfg;
+ } __packed __aligned(4) msg = {
+ .cr_mode = type,
+ .temp = temp_level,
+ .ch = channel,
+ };
+ struct sk_buff *skb;
+ u32 val;
+
+ val = BIT(31);
+ val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
+ val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
+ msg.cfg = cpu_to_le32(val);
+
+ /* first set the channel without the extension channel info */
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_LOAD_CR, true);
+}
+
+int mt76x2u_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
+ u8 bw_index, bool scan)
+{
+ struct {
+ u8 idx;
+ u8 scan;
+ u8 bw;
+ u8 _pad0;
+
+ __le16 chainmask;
+ u8 ext_chan;
+ u8 _pad1;
+
+ } __packed __aligned(4) msg = {
+ .idx = channel,
+ .scan = scan,
+ .bw = bw,
+ .chainmask = cpu_to_le16(dev->chainmask),
+ };
+ struct sk_buff *skb;
+
+ /* first set the channel without the extension channel info */
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+
+ mt76u_mcu_send_msg(&dev->mt76, skb, CMD_SWITCH_CHANNEL_OP, true);
+
+ usleep_range(5000, 10000);
+
+ msg.ext_chan = 0xe0 + bw_index;
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_SWITCH_CHANNEL_OP, true);
+}
+
+int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
+ u32 val)
+{
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(type),
+ .value = cpu_to_le32(val),
+ };
+ struct sk_buff *skb;
+
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_CALIBRATION_OP, true);
+}
+
+int mt76x2u_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
+ bool force)
+{
+ struct {
+ __le32 channel;
+ __le32 gain_val;
+ } __packed __aligned(4) msg = {
+ .channel = cpu_to_le32(channel),
+ .gain_val = cpu_to_le32(gain),
+ };
+ struct sk_buff *skb;
+
+ if (force)
+ msg.channel |= cpu_to_le32(BIT(31));
+
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_INIT_GAIN_OP, true);
+}
+
+int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap,
+ bool ext, int rssi, u32 false_cca)
+{
+ struct {
+ __le32 channel;
+ __le32 rssi_val;
+ __le32 false_cca_val;
+ } __packed __aligned(4) msg = {
+ .rssi_val = cpu_to_le32(rssi),
+ .false_cca_val = cpu_to_le32(false_cca),
+ };
+ struct sk_buff *skb;
+ u32 val = channel;
+
+ if (ap)
+ val |= BIT(31);
+ if (ext)
+ val |= BIT(30);
+ msg.channel = cpu_to_le32(val);
+
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_DYNC_VGA_OP, true);
+}
+
+int mt76x2u_mcu_tssi_comp(struct mt76x2_dev *dev,
+ struct mt76x2_tssi_comp *tssi_data)
+{
+ struct {
+ __le32 id;
+ struct mt76x2_tssi_comp data;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(MCU_CAL_TSSI_COMP),
+ .data = *tssi_data,
+ };
+ struct sk_buff *skb;
+
+ skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ if (!skb)
+ return -ENOMEM;
+ return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_CALIBRATION_OP, true);
+}
+
+static void mt76x2u_mcu_load_ivb(struct mt76x2_dev *dev)
+{
+ mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ 0x12, 0, NULL, 0);
+}
+
+static void mt76x2u_mcu_enable_patch(struct mt76x2_dev *dev)
+{
+ struct mt76_usb *usb = &dev->mt76.usb;
+ const u8 data[] = {
+ 0x6f, 0xfc, 0x08, 0x01,
+ 0x20, 0x04, 0x00, 0x00,
+ 0x00, 0x09, 0x00,
+ };
+
+ memcpy(usb->data, data, sizeof(data));
+ mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+ USB_DIR_OUT | USB_TYPE_CLASS,
+ 0x12, 0, usb->data, sizeof(data));
+}
+
+static void mt76x2u_mcu_reset_wmt(struct mt76x2_dev *dev)
+{
+ struct mt76_usb *usb = &dev->mt76.usb;
+ u8 data[] = {
+ 0x6f, 0xfc, 0x05, 0x01,
+ 0x07, 0x01, 0x00, 0x04
+ };
+
+ memcpy(usb->data, data, sizeof(data));
+ mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+ USB_DIR_OUT | USB_TYPE_CLASS,
+ 0x12, 0, usb->data, sizeof(data));
+}
+
+static int mt76x2u_mcu_load_rom_patch(struct mt76x2_dev *dev)
+{
+ bool rom_protect = !is_mt7612(dev);
+ struct mt76x2_patch_header *hdr;
+ u32 val, patch_mask, patch_reg;
+ const struct firmware *fw;
+ int err;
+
+ if (rom_protect &&
+ !mt76_poll_msec(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) {
+ dev_err(dev->mt76.dev,
+ "could not get hardware semaphore for ROM PATCH\n");
+ return -ETIMEDOUT;
+ }
+
+ if (mt76xx_rev(dev) >= MT76XX_REV_E3) {
+ patch_mask = BIT(0);
+ patch_reg = MT_MCU_CLOCK_CTL;
+ } else {
+ patch_mask = BIT(1);
+ patch_reg = MT_MCU_COM_REG0;
+ }
+
+ if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) {
+ dev_info(dev->mt76.dev, "ROM patch already applied\n");
+ return 0;
+ }
+
+ err = request_firmware(&fw, MT7662U_ROM_PATCH, dev->mt76.dev);
+ if (err < 0)
+ return err;
+
+ if (!fw || !fw->data || fw->size <= sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "failed to load firmware\n");
+ err = -EIO;
+ goto out;
+ }
+
+ hdr = (struct mt76x2_patch_header *)fw->data;
+ dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time);
+
+ /* enable USB_DMA_CFG */
+ val = MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN |
+ FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20);
+ mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
+
+ /* vendor reset */
+ mt76u_mcu_fw_reset(&dev->mt76);
+ usleep_range(5000, 10000);
+
+ /* enable FCE to send in-band cmd */
+ mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
+ /* FCE tx_fs_base_ptr */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+ /* FCE tx_fs_max_cnt */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 0x1);
+ /* FCE pdma enable */
+ mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+ /* FCE skip_fs_en */
+ mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
+
+ err = mt76u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr),
+ fw->size - sizeof(*hdr),
+ MCU_ROM_PATCH_MAX_PAYLOAD,
+ MT76U_MCU_ROM_PATCH_OFFSET);
+ if (err < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ mt76x2u_mcu_enable_patch(dev);
+ mt76x2u_mcu_reset_wmt(dev);
+ mdelay(20);
+
+ if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 100)) {
+ dev_err(dev->mt76.dev, "failed to load ROM patch\n");
+ err = -ETIMEDOUT;
+ }
+
+out:
+ if (rom_protect)
+ mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1);
+ release_firmware(fw);
+ return err;
+}
+
+static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev)
+{
+ u32 val, dlm_offset = MT76U_MCU_DLM_OFFSET;
+ const struct mt76x2_fw_header *hdr;
+ int err, len, ilm_len, dlm_len;
+ const struct firmware *fw;
+
+ err = request_firmware(&fw, MT7662U_FIRMWARE, dev->mt76.dev);
+ if (err < 0)
+ return err;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt76x2_fw_header *)fw->data;
+ ilm_len = le32_to_cpu(hdr->ilm_len);
+ dlm_len = le32_to_cpu(hdr->dlm_len);
+ len = sizeof(*hdr) + ilm_len + dlm_len;
+ if (fw->size != len) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ val = le16_to_cpu(hdr->fw_ver);
+ dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
+ (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
+
+ val = le16_to_cpu(hdr->build_ver);
+ dev_info(dev->mt76.dev, "Build: %x\n", val);
+ dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
+
+ /* vendor reset */
+ mt76u_mcu_fw_reset(&dev->mt76);
+ usleep_range(5000, 10000);
+
+ /* enable USB_DMA_CFG */
+ val = MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN |
+ FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20);
+ mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
+ /* enable FCE to send in-band cmd */
+ mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
+ /* FCE tx_fs_base_ptr */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+ /* FCE tx_fs_max_cnt */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 0x1);
+ /* FCE pdma enable */
+ mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+ /* FCE skip_fs_en */
+ mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
+
+ /* load ILM */
+ err = mt76u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr),
+ ilm_len, MCU_FW_URB_MAX_PAYLOAD,
+ MT76U_MCU_ILM_OFFSET);
+ if (err < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ /* load DLM */
+ if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+ dlm_offset += 0x800;
+ err = mt76u_mcu_fw_send_data(&dev->mt76,
+ fw->data + sizeof(*hdr) + ilm_len,
+ dlm_len, MCU_FW_URB_MAX_PAYLOAD,
+ dlm_offset);
+ if (err < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ mt76x2u_mcu_load_ivb(dev);
+ if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 100)) {
+ dev_err(dev->mt76.dev, "firmware failed to start\n");
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ mt76_set(dev, MT_MCU_COM_REG0, BIT(1));
+ /* enable FCE to send in-band cmd */
+ mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
+ dev_dbg(dev->mt76.dev, "firmware running\n");
+
+out:
+ release_firmware(fw);
+ return err;
+}
+
+int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev)
+{
+ int err;
+
+ err = mt76x2u_mcu_load_rom_patch(dev);
+ if (err < 0)
+ return err;
+
+ return mt76x2u_mcu_load_firmware(dev);
+}
+
+int mt76x2u_mcu_init(struct mt76x2_dev *dev)
+{
+ int err;
+
+ err = mt76x2u_mcu_function_select(dev, Q_SELECT, 1);
+ if (err < 0)
+ return err;
+
+ return mt76x2u_mcu_set_radio_state(dev, true);
+}
+
+void mt76x2u_mcu_deinit(struct mt76x2_dev *dev)
+{
+ struct mt76_usb *usb = &dev->mt76.usb;
+
+ usb_kill_urb(usb->mcu.res.urb);
+ mt76u_buf_free(&usb->mcu.res);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c
new file mode 100644
index 000000000000..5158063d0c2e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2u.h"
+#include "mt76x2_eeprom.h"
+
+void mt76x2u_phy_set_rxpath(struct mt76x2_dev *dev)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_BBP(AGC, 0));
+ val &= ~BIT(4);
+
+ switch (dev->chainmask & 0xf) {
+ case 2:
+ val |= BIT(3);
+ break;
+ default:
+ val &= ~BIT(3);
+ break;
+ }
+ mt76_wr(dev, MT_BBP(AGC, 0), val);
+}
+
+void mt76x2u_phy_set_txdac(struct mt76x2_dev *dev)
+{
+ int txpath;
+
+ txpath = (dev->chainmask >> 8) & 0xf;
+ switch (txpath) {
+ case 2:
+ mt76_set(dev, MT_BBP(TXBE, 5), 0x3);
+ break;
+ default:
+ mt76_clear(dev, MT_BBP(TXBE, 5), 0x3);
+ break;
+ }
+}
+
+void mt76x2u_phy_channel_calibrate(struct mt76x2_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
+
+ if (mt76x2_channel_silent(dev))
+ return;
+
+ mt76x2u_mac_stop(dev);
+
+ if (is_5ghz)
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_LC, 0);
+
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz);
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz);
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0);
+
+ mt76x2u_mac_resume(dev);
+}
+
+static void
+mt76x2u_phy_tssi_compensate(struct mt76x2_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct mt76x2_tx_power_info txp;
+ struct mt76x2_tssi_comp t = {};
+
+ if (!dev->cal.tssi_cal_done)
+ return;
+
+ if (!dev->cal.tssi_comp_pending) {
+ /* TSSI trigger */
+ t.cal_mode = BIT(0);
+ mt76x2u_mcu_tssi_comp(dev, &t);
+ dev->cal.tssi_comp_pending = true;
+ } else {
+ if (mt76_rr(dev, MT_BBP(CORE, 34)) & BIT(4))
+ return;
+
+ dev->cal.tssi_comp_pending = false;
+ mt76x2_get_power_info(dev, &txp, chan);
+
+ if (mt76x2_ext_pa_enabled(dev, chan->band))
+ t.pa_mode = 1;
+
+ t.cal_mode = BIT(1);
+ t.slope0 = txp.chain[0].tssi_slope;
+ t.offset0 = txp.chain[0].tssi_offset;
+ t.slope1 = txp.chain[1].tssi_slope;
+ t.offset1 = txp.chain[1].tssi_offset;
+ mt76x2u_mcu_tssi_comp(dev, &t);
+
+ if (t.pa_mode || dev->cal.dpd_cal_done)
+ return;
+
+ usleep_range(10000, 20000);
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value);
+ dev->cal.dpd_cal_done = true;
+ }
+}
+
+static void
+mt76x2u_phy_update_channel_gain(struct mt76x2_dev *dev)
+{
+ u8 channel = dev->mt76.chandef.chan->hw_value;
+ int freq, freq1;
+ u32 false_cca;
+
+ freq = dev->mt76.chandef.chan->center_freq;
+ freq1 = dev->mt76.chandef.center_freq1;
+
+ switch (dev->mt76.chandef.width) {
+ case NL80211_CHAN_WIDTH_80: {
+ int ch_group_index;
+
+ ch_group_index = (freq - freq1 + 30) / 20;
+ if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+ ch_group_index = 0;
+ channel += 6 - ch_group_index * 4;
+ break;
+ }
+ case NL80211_CHAN_WIDTH_40:
+ if (freq1 > freq)
+ channel += 2;
+ else
+ channel -= 2;
+ break;
+ default:
+ break;
+ }
+
+ dev->cal.avg_rssi_all = mt76x2_phy_get_min_avg_rssi(dev);
+ false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS,
+ mt76_rr(dev, MT_RX_STAT_1));
+
+ mt76x2u_mcu_set_dynamic_vga(dev, channel, false, false,
+ dev->cal.avg_rssi_all, false_cca);
+}
+
+void mt76x2u_phy_calibrate(struct work_struct *work)
+{
+ struct mt76x2_dev *dev;
+
+ dev = container_of(work, struct mt76x2_dev, cal_work.work);
+ mt76x2u_phy_tssi_compensate(dev);
+ mt76x2u_phy_update_channel_gain(dev);
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+}
+
+int mt76x2u_phy_set_channel(struct mt76x2_dev *dev,
+ struct cfg80211_chan_def *chandef)
+{
+ u32 ext_cca_chan[4] = {
+ [0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)),
+ [1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)),
+ [2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)),
+ [3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
+ };
+ bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+ struct ieee80211_channel *chan = chandef->chan;
+ u8 channel = chan->hw_value, bw, bw_index;
+ int ch_group_index, freq, freq1, ret;
+
+ dev->cal.channel_cal_done = false;
+ freq = chandef->chan->center_freq;
+ freq1 = chandef->center_freq1;
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_40:
+ bw = 1;
+ if (freq1 > freq) {
+ bw_index = 1;
+ ch_group_index = 0;
+ } else {
+ bw_index = 3;
+ ch_group_index = 1;
+ }
+ channel += 2 - ch_group_index * 4;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ ch_group_index = (freq - freq1 + 30) / 20;
+ if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+ ch_group_index = 0;
+ bw = 2;
+ bw_index = ch_group_index;
+ channel += 6 - ch_group_index * 4;
+ break;
+ default:
+ bw = 0;
+ bw_index = 0;
+ ch_group_index = 0;
+ break;
+ }
+
+ mt76x2_read_rx_gain(dev);
+ mt76x2_phy_set_txpower_regs(dev, chan->band);
+ mt76x2_configure_tx_delay(dev, chan->band, bw);
+ mt76x2_phy_set_txpower(dev);
+
+ mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1);
+ mt76x2_phy_set_bw(dev, chandef->width, ch_group_index);
+
+ mt76_rmw(dev, MT_EXT_CCA_CFG,
+ (MT_EXT_CCA_CFG_CCA0 |
+ MT_EXT_CCA_CFG_CCA1 |
+ MT_EXT_CCA_CFG_CCA2 |
+ MT_EXT_CCA_CFG_CCA3 |
+ MT_EXT_CCA_CFG_CCA_MASK),
+ ext_cca_chan[ch_group_index]);
+
+ ret = mt76x2u_mcu_set_channel(dev, channel, bw, bw_index, scan);
+ if (ret)
+ return ret;
+
+ mt76x2u_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true);
+
+ /* Enable LDPC Rx */
+ if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+ mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
+
+ if (!dev->cal.init_cal_done) {
+ u8 val = mt76x2_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
+
+ if (val != 0xff)
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_R, 0);
+ }
+
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel);
+
+ /* Rx LPF calibration */
+ if (!dev->cal.init_cal_done)
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_RC, 0);
+ dev->cal.init_cal_done = true;
+
+ mt76_wr(dev, MT_BBP(AGC, 61), 0xff64a4e2);
+ mt76_wr(dev, MT_BBP(AGC, 7), 0x08081010);
+ mt76_wr(dev, MT_BBP(AGC, 11), 0x00000404);
+ mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
+ mt76_wr(dev, MT_TXOP_CTRL_CFG, 0X04101b3f);
+
+ mt76_set(dev, MT_BBP(TXO, 4), BIT(25));
+ mt76_set(dev, MT_BBP(RXO, 13), BIT(8));
+
+ if (scan)
+ return 0;
+
+ if (mt76x2_tssi_enabled(dev)) {
+ /* init default values for temp compensation */
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
+ 0x38);
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP,
+ 0x38);
+
+ /* init tssi calibration */
+ if (!mt76x2_channel_silent(dev)) {
+ struct ieee80211_channel *chan;
+ u32 flag = 0;
+
+ chan = dev->mt76.chandef.chan;
+ if (chan->band == NL80211_BAND_5GHZ)
+ flag |= BIT(0);
+ if (mt76x2_ext_pa_enabled(dev, chan->band))
+ flag |= BIT(8);
+ mt76x2u_mcu_calibrate(dev, MCU_CAL_TSSI, flag);
+ dev->cal.tssi_cal_done = true;
+ }
+ }
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index e96956710fb2..af48d43bb7dc 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -51,7 +51,7 @@ __mt76_get_txwi(struct mt76_dev *dev)
return t;
}
-static struct mt76_txwi_cache *
+struct mt76_txwi_cache *
mt76_get_txwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
@@ -91,80 +91,6 @@ mt76_txq_get_qid(struct ieee80211_txq *txq)
return txq->ac;
}
-int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta)
-{
- struct mt76_queue_entry e;
- struct mt76_txwi_cache *t;
- struct mt76_queue_buf buf[32];
- struct sk_buff *iter;
- dma_addr_t addr;
- int len;
- u32 tx_info = 0;
- int n, ret;
-
- t = mt76_get_txwi(dev);
- if (!t) {
- ieee80211_free_txskb(dev->hw, skb);
- return -ENOMEM;
- }
-
- dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
- DMA_TO_DEVICE);
- ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
- &tx_info);
- dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
- DMA_TO_DEVICE);
- if (ret < 0)
- goto free;
-
- len = skb->len - skb->data_len;
- addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
- if (dma_mapping_error(dev->dev, addr)) {
- ret = -ENOMEM;
- goto free;
- }
-
- n = 0;
- buf[n].addr = t->dma_addr;
- buf[n++].len = dev->drv->txwi_size;
- buf[n].addr = addr;
- buf[n++].len = len;
-
- skb_walk_frags(skb, iter) {
- if (n == ARRAY_SIZE(buf))
- goto unmap;
-
- addr = dma_map_single(dev->dev, iter->data, iter->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev->dev, addr))
- goto unmap;
-
- buf[n].addr = addr;
- buf[n++].len = iter->len;
- }
-
- if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
- goto unmap;
-
- return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t);
-
-unmap:
- ret = -ENOMEM;
- for (n--; n > 0; n--)
- dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
- DMA_TO_DEVICE);
-
-free:
- e.skb = skb;
- e.txwi = t;
- dev->drv->tx_complete_skb(dev, q, &e, true);
- mt76_put_txwi(dev, t);
- return ret;
-}
-EXPORT_SYMBOL_GPL(mt76_tx_queue_skb);
-
void
mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb)
@@ -185,7 +111,7 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
q = &dev->q_tx[qid];
spin_lock_bh(&q->lock);
- mt76_tx_queue_skb(dev, q, skb, wcid, sta);
+ dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
dev->queue_ops->kick(dev, q);
if (q->queued > q->ndesc - 8)
@@ -241,7 +167,7 @@ mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
info->flags |= IEEE80211_TX_STATUS_EOSP;
mt76_skb_set_moredata(skb, !last);
- mt76_tx_queue_skb(dev, hwq, skb, wcid, sta);
+ dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, sta);
}
void
@@ -321,7 +247,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
if (ampdu)
mt76_check_agg_ssn(mtxq, skb);
- idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
+ idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
if (idx < 0)
return idx;
@@ -356,7 +282,8 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
if (cur_ampdu)
mt76_check_agg_ssn(mtxq, skb);
- idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
+ idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid,
+ txq->sta);
if (idx < 0)
return idx;
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
new file mode 100644
index 000000000000..7780b07543bb
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -0,0 +1,845 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76.h"
+#include "usb_trace.h"
+#include "dma.h"
+
+#define MT_VEND_REQ_MAX_RETRY 10
+#define MT_VEND_REQ_TOUT_MS 300
+
+/* should be called with usb_ctrl_mtx locked */
+static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
+ u8 req_type, u16 val, u16 offset,
+ void *buf, size_t len)
+{
+ struct usb_interface *intf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned int pipe;
+ int i, ret;
+
+ pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
+ : usb_sndctrlpipe(udev, 0);
+ for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
+ if (test_bit(MT76_REMOVED, &dev->state))
+ return -EIO;
+
+ ret = usb_control_msg(udev, pipe, req, req_type, val,
+ offset, buf, len, MT_VEND_REQ_TOUT_MS);
+ if (ret == -ENODEV)
+ set_bit(MT76_REMOVED, &dev->state);
+ if (ret >= 0 || ret == -ENODEV)
+ return ret;
+ usleep_range(5000, 10000);
+ }
+
+ dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
+ req, offset, ret);
+ return ret;
+}
+
+int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
+ u8 req_type, u16 val, u16 offset,
+ void *buf, size_t len)
+{
+ int ret;
+
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ret = __mt76u_vendor_request(dev, req, req_type,
+ val, offset, buf, len);
+ trace_usb_reg_wr(dev, offset, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76u_vendor_request);
+
+/* should be called with usb_ctrl_mtx locked */
+static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
+{
+ struct mt76_usb *usb = &dev->usb;
+ u32 data = ~0;
+ u16 offset;
+ int ret;
+ u8 req;
+
+ switch (addr & MT_VEND_TYPE_MASK) {
+ case MT_VEND_TYPE_EEPROM:
+ req = MT_VEND_READ_EEPROM;
+ break;
+ case MT_VEND_TYPE_CFG:
+ req = MT_VEND_READ_CFG;
+ break;
+ default:
+ req = MT_VEND_MULTI_READ;
+ break;
+ }
+ offset = addr & ~MT_VEND_TYPE_MASK;
+
+ ret = __mt76u_vendor_request(dev, req,
+ USB_DIR_IN | USB_TYPE_VENDOR,
+ 0, offset, usb->data, sizeof(__le32));
+ if (ret == sizeof(__le32))
+ data = get_unaligned_le32(usb->data);
+ trace_usb_reg_rr(dev, addr, data);
+
+ return data;
+}
+
+u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
+{
+ u32 ret;
+
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ret = __mt76u_rr(dev, addr);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return ret;
+}
+
+/* should be called with usb_ctrl_mtx locked */
+static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
+ struct mt76_usb *usb = &dev->usb;
+ u16 offset;
+ u8 req;
+
+ switch (addr & MT_VEND_TYPE_MASK) {
+ case MT_VEND_TYPE_CFG:
+ req = MT_VEND_WRITE_CFG;
+ break;
+ default:
+ req = MT_VEND_MULTI_WRITE;
+ break;
+ }
+ offset = addr & ~MT_VEND_TYPE_MASK;
+
+ put_unaligned_le32(val, usb->data);
+ __mt76u_vendor_request(dev, req,
+ USB_DIR_OUT | USB_TYPE_VENDOR, 0,
+ offset, usb->data, sizeof(__le32));
+ trace_usb_reg_wr(dev, addr, val);
+}
+
+void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ __mt76u_wr(dev, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+
+static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
+ u32 mask, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ val |= __mt76u_rr(dev, addr) & ~mask;
+ __mt76u_wr(dev, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return val;
+}
+
+static void mt76u_copy(struct mt76_dev *dev, u32 offset,
+ const void *data, int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+ const u32 *val = data;
+ int i, ret;
+
+ mutex_lock(&usb->usb_ctrl_mtx);
+ for (i = 0; i < (len / 4); i++) {
+ put_unaligned_le32(val[i], usb->data);
+ ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ 0, offset + i * 4, usb->data,
+ sizeof(__le32));
+ if (ret < 0)
+ break;
+ }
+ mutex_unlock(&usb->usb_ctrl_mtx);
+}
+
+void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
+ const u16 offset, const u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ __mt76u_vendor_request(dev, req,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ val & 0xffff, offset, NULL, 0);
+ __mt76u_vendor_request(dev, req,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ val >> 16, offset + 2, NULL, 0);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+EXPORT_SYMBOL_GPL(mt76u_single_wr);
+
+static int
+mt76u_set_endpoints(struct usb_interface *intf,
+ struct mt76_usb *usb)
+{
+ struct usb_host_interface *intf_desc = intf->cur_altsetting;
+ struct usb_endpoint_descriptor *ep_desc;
+ int i, in_ep = 0, out_ep = 0;
+
+ for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
+ ep_desc = &intf_desc->endpoint[i].desc;
+
+ if (usb_endpoint_is_bulk_in(ep_desc) &&
+ in_ep < __MT_EP_IN_MAX) {
+ usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
+ usb->in_max_packet = usb_endpoint_maxp(ep_desc);
+ in_ep++;
+ } else if (usb_endpoint_is_bulk_out(ep_desc) &&
+ out_ep < __MT_EP_OUT_MAX) {
+ usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
+ usb->out_max_packet = usb_endpoint_maxp(ep_desc);
+ out_ep++;
+ }
+ }
+
+ if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
+ return -EINVAL;
+ return 0;
+}
+
+static int
+mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
+ int nsgs, int len, int sglen)
+{
+ struct urb *urb = buf->urb;
+ int i;
+
+ for (i = 0; i < nsgs; i++) {
+ struct page *page;
+ void *data;
+ int offset;
+
+ data = netdev_alloc_frag(len);
+ if (!data)
+ break;
+
+ page = virt_to_head_page(data);
+ offset = data - page_address(page);
+ sg_set_page(&urb->sg[i], page, sglen, offset);
+ }
+
+ if (i < nsgs) {
+ int j;
+
+ for (j = nsgs; j < urb->num_sgs; j++)
+ skb_free_frag(sg_virt(&urb->sg[j]));
+ urb->num_sgs = i;
+ }
+
+ urb->num_sgs = max_t(int, i, urb->num_sgs);
+ buf->len = urb->num_sgs * sglen,
+ sg_init_marker(urb->sg, urb->num_sgs);
+
+ return i ? : -ENOMEM;
+}
+
+int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
+ int nsgs, int len, int sglen, gfp_t gfp)
+{
+ buf->urb = usb_alloc_urb(0, gfp);
+ if (!buf->urb)
+ return -ENOMEM;
+
+ buf->urb->sg = devm_kzalloc(dev->dev, nsgs * sizeof(*buf->urb->sg),
+ gfp);
+ if (!buf->urb->sg)
+ return -ENOMEM;
+
+ sg_init_table(buf->urb->sg, nsgs);
+ buf->dev = dev;
+
+ return mt76u_fill_rx_sg(dev, buf, nsgs, len, sglen);
+}
+EXPORT_SYMBOL_GPL(mt76u_buf_alloc);
+
+void mt76u_buf_free(struct mt76u_buf *buf)
+{
+ struct urb *urb = buf->urb;
+ int i;
+
+ for (i = 0; i < urb->num_sgs; i++)
+ skb_free_frag(sg_virt(&urb->sg[i]));
+ usb_free_urb(buf->urb);
+}
+EXPORT_SYMBOL_GPL(mt76u_buf_free);
+
+int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
+ struct mt76u_buf *buf, gfp_t gfp,
+ usb_complete_t complete_fn, void *context)
+{
+ struct usb_interface *intf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned int pipe;
+
+ if (dir == USB_DIR_IN)
+ pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
+ else
+ pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
+
+ usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, buf->len,
+ complete_fn, context);
+
+ return usb_submit_urb(buf->urb, gfp);
+}
+EXPORT_SYMBOL_GPL(mt76u_submit_buf);
+
+static inline struct mt76u_buf
+*mt76u_get_next_rx_entry(struct mt76_queue *q)
+{
+ struct mt76u_buf *buf = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->lock, flags);
+ if (q->queued > 0) {
+ buf = &q->entry[q->head].ubuf;
+ q->head = (q->head + 1) % q->ndesc;
+ q->queued--;
+ }
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return buf;
+}
+
+static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
+{
+ u16 dma_len, min_len;
+
+ dma_len = get_unaligned_le16(data);
+ min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
+ MT_FCE_INFO_LEN;
+
+ if (data_len < min_len || WARN_ON(!dma_len) ||
+ WARN_ON(dma_len + MT_DMA_HDR_LEN > data_len) ||
+ WARN_ON(dma_len & 0x3))
+ return -EINVAL;
+ return dma_len;
+}
+
+static int
+mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
+{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ u8 *data = sg_virt(&urb->sg[0]);
+ int data_len, len, nsgs = 1;
+ struct sk_buff *skb;
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
+ return 0;
+
+ len = mt76u_get_rx_entry_len(data, urb->actual_length);
+ if (len < 0)
+ return 0;
+
+ skb = build_skb(data, q->buf_size);
+ if (!skb)
+ return 0;
+
+ data_len = min_t(int, len, urb->sg[0].length - MT_DMA_HDR_LEN);
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+ if (skb->tail + data_len > skb->end) {
+ dev_kfree_skb(skb);
+ return 1;
+ }
+
+ __skb_put(skb, data_len);
+ len -= data_len;
+
+ while (len > 0) {
+ data_len = min_t(int, len, urb->sg[nsgs].length);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ sg_page(&urb->sg[nsgs]),
+ urb->sg[nsgs].offset,
+ data_len, q->buf_size);
+ len -= data_len;
+ nsgs++;
+ }
+ dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
+
+ return nsgs;
+}
+
+static void mt76u_complete_rx(struct urb *urb)
+{
+ struct mt76_dev *dev = urb->context;
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ unsigned long flags;
+
+ switch (urb->status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ case -ENOENT:
+ return;
+ default:
+ dev_err(dev->dev, "rx urb failed: %d\n", urb->status);
+ /* fall through */
+ case 0:
+ break;
+ }
+
+ spin_lock_irqsave(&q->lock, flags);
+ if (WARN_ONCE(q->entry[q->tail].ubuf.urb != urb, "rx urb mismatch"))
+ goto out;
+
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued++;
+ tasklet_schedule(&dev->usb.rx_tasklet);
+out:
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+
+static void mt76u_rx_tasklet(unsigned long data)
+{
+ struct mt76_dev *dev = (struct mt76_dev *)data;
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ int err, nsgs, buf_len = q->buf_size;
+ struct mt76u_buf *buf;
+
+ rcu_read_lock();
+
+ while (true) {
+ buf = mt76u_get_next_rx_entry(q);
+ if (!buf)
+ break;
+
+ nsgs = mt76u_process_rx_entry(dev, buf->urb);
+ if (nsgs > 0) {
+ err = mt76u_fill_rx_sg(dev, buf, nsgs,
+ buf_len,
+ SKB_WITH_OVERHEAD(buf_len));
+ if (err < 0)
+ break;
+ }
+ mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
+ buf, GFP_ATOMIC,
+ mt76u_complete_rx, dev);
+ }
+ mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
+
+ rcu_read_unlock();
+}
+
+int mt76u_submit_rx_buffers(struct mt76_dev *dev)
+{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ unsigned long flags;
+ int i, err = 0;
+
+ spin_lock_irqsave(&q->lock, flags);
+ for (i = 0; i < q->ndesc; i++) {
+ err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
+ &q->entry[i].ubuf, GFP_ATOMIC,
+ mt76u_complete_rx, dev);
+ if (err < 0)
+ break;
+ }
+ q->head = q->tail = 0;
+ q->queued = 0;
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers);
+
+static int mt76u_alloc_rx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ int i, err, nsgs;
+
+ spin_lock_init(&q->lock);
+ q->entry = devm_kzalloc(dev->dev,
+ MT_NUM_RX_ENTRIES * sizeof(*q->entry),
+ GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ if (mt76u_check_sg(dev)) {
+ q->buf_size = MT_RX_BUF_SIZE;
+ nsgs = MT_SG_MAX_SIZE;
+ } else {
+ q->buf_size = PAGE_SIZE;
+ nsgs = 1;
+ }
+
+ for (i = 0; i < MT_NUM_RX_ENTRIES; i++) {
+ err = mt76u_buf_alloc(dev, &q->entry[i].ubuf,
+ nsgs, q->buf_size,
+ SKB_WITH_OVERHEAD(q->buf_size),
+ GFP_KERNEL);
+ if (err < 0)
+ return err;
+ }
+ q->ndesc = MT_NUM_RX_ENTRIES;
+
+ return mt76u_submit_rx_buffers(dev);
+}
+
+static void mt76u_free_rx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ int i;
+
+ for (i = 0; i < q->ndesc; i++)
+ mt76u_buf_free(&q->entry[i].ubuf);
+}
+
+static void mt76u_stop_rx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ int i;
+
+ for (i = 0; i < q->ndesc; i++)
+ usb_kill_urb(q->entry[i].ubuf.urb);
+}
+
+int mt76u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
+{
+ struct sk_buff *iter, *last = skb;
+ u32 info, pad;
+
+ /* Buffer layout:
+ * | 4B | xfer len | pad | 4B |
+ * | TXINFO | pkt/cmd | zero pad to 4B | zero |
+ *
+ * length field of TXINFO should be set to 'xfer len'.
+ */
+ info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
+ FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags;
+ put_unaligned_le32(info, skb_push(skb, sizeof(info)));
+
+ pad = round_up(skb->len, 4) + 4 - skb->len;
+ skb_walk_frags(skb, iter) {
+ last = iter;
+ if (!iter->next) {
+ skb->data_len += pad;
+ skb->len += pad;
+ break;
+ }
+ }
+
+ if (unlikely(pad)) {
+ if (__skb_pad(last, pad, true))
+ return -ENOMEM;
+ __skb_put(last, pad);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76u_skb_dma_info);
+
+static void mt76u_tx_tasklet(unsigned long data)
+{
+ struct mt76_dev *dev = (struct mt76_dev *)data;
+ struct mt76u_buf *buf;
+ struct mt76_queue *q;
+ bool wake;
+ int i;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = &dev->q_tx[i];
+
+ spin_lock_bh(&q->lock);
+ while (true) {
+ buf = &q->entry[q->head].ubuf;
+ if (!buf->done || !q->queued)
+ break;
+
+ dev->drv->tx_complete_skb(dev, q,
+ &q->entry[q->head],
+ false);
+
+ if (q->entry[q->head].schedule) {
+ q->entry[q->head].schedule = false;
+ q->swq_queued--;
+ }
+
+ q->head = (q->head + 1) % q->ndesc;
+ q->queued--;
+ }
+ mt76_txq_schedule(dev, q);
+ wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
+ if (!q->queued)
+ wake_up(&dev->tx_wait);
+
+ spin_unlock_bh(&q->lock);
+
+ if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
+ ieee80211_queue_delayed_work(dev->hw,
+ &dev->usb.stat_work,
+ msecs_to_jiffies(10));
+
+ if (wake)
+ ieee80211_wake_queue(dev->hw, i);
+ }
+}
+
+static void mt76u_tx_status_data(struct work_struct *work)
+{
+ struct mt76_usb *usb;
+ struct mt76_dev *dev;
+ u8 update = 1;
+ u16 count = 0;
+
+ usb = container_of(work, struct mt76_usb, stat_work.work);
+ dev = container_of(usb, struct mt76_dev, usb);
+
+ while (true) {
+ if (test_bit(MT76_REMOVED, &dev->state))
+ break;
+
+ if (!dev->drv->tx_status_data(dev, &update))
+ break;
+ count++;
+ }
+
+ if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
+ ieee80211_queue_delayed_work(dev->hw, &usb->stat_work,
+ msecs_to_jiffies(10));
+ else
+ clear_bit(MT76_READING_STATS, &dev->state);
+}
+
+static void mt76u_complete_tx(struct urb *urb)
+{
+ struct mt76u_buf *buf = urb->context;
+ struct mt76_dev *dev = buf->dev;
+
+ if (mt76u_urb_error(urb))
+ dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
+ buf->done = true;
+
+ tasklet_schedule(&dev->usb.tx_tasklet);
+}
+
+static int
+mt76u_tx_build_sg(struct sk_buff *skb, struct urb *urb)
+{
+ int nsgs = 1 + skb_shinfo(skb)->nr_frags;
+ struct sk_buff *iter;
+
+ skb_walk_frags(skb, iter)
+ nsgs += 1 + skb_shinfo(iter)->nr_frags;
+
+ memset(urb->sg, 0, sizeof(*urb->sg) * MT_SG_MAX_SIZE);
+
+ nsgs = min_t(int, MT_SG_MAX_SIZE, nsgs);
+ sg_init_marker(urb->sg, nsgs);
+ urb->num_sgs = nsgs;
+
+ return skb_to_sgvec_nomark(skb, urb->sg, 0, skb->len);
+}
+
+static int
+mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta)
+{
+ struct usb_interface *intf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ u8 ep = q2ep(q->hw_idx);
+ struct mt76u_buf *buf;
+ u16 idx = q->tail;
+ unsigned int pipe;
+ int err;
+
+ if (q->queued == q->ndesc)
+ return -ENOSPC;
+
+ err = dev->drv->tx_prepare_skb(dev, NULL, skb, q, wcid, sta, NULL);
+ if (err < 0)
+ return err;
+
+ buf = &q->entry[idx].ubuf;
+ buf->done = false;
+
+ err = mt76u_tx_build_sg(skb, buf->urb);
+ if (err < 0)
+ return err;
+
+ pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[ep]);
+ usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, skb->len,
+ mt76u_complete_tx, buf);
+
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->entry[idx].skb = skb;
+ q->queued++;
+
+ return idx;
+}
+
+static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ struct mt76u_buf *buf;
+ int err;
+
+ while (q->first != q->tail) {
+ buf = &q->entry[q->first].ubuf;
+ err = usb_submit_urb(buf->urb, GFP_ATOMIC);
+ if (err < 0) {
+ if (err == -ENODEV)
+ set_bit(MT76_REMOVED, &dev->state);
+ else
+ dev_err(dev->dev, "tx urb submit failed:%d\n",
+ err);
+ break;
+ }
+ q->first = (q->first + 1) % q->ndesc;
+ }
+}
+
+static int mt76u_alloc_tx(struct mt76_dev *dev)
+{
+ struct mt76u_buf *buf;
+ struct mt76_queue *q;
+ size_t size;
+ int i, j;
+
+ size = MT_SG_MAX_SIZE * sizeof(struct scatterlist);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = &dev->q_tx[i];
+ spin_lock_init(&q->lock);
+ INIT_LIST_HEAD(&q->swq);
+ q->hw_idx = q2hwq(i);
+
+ q->entry = devm_kzalloc(dev->dev,
+ MT_NUM_TX_ENTRIES * sizeof(*q->entry),
+ GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ q->ndesc = MT_NUM_TX_ENTRIES;
+ for (j = 0; j < q->ndesc; j++) {
+ buf = &q->entry[j].ubuf;
+ buf->dev = dev;
+
+ buf->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!buf->urb)
+ return -ENOMEM;
+
+ buf->urb->sg = devm_kzalloc(dev->dev, size, GFP_KERNEL);
+ if (!buf->urb->sg)
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+static void mt76u_free_tx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q;
+ int i, j;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = &dev->q_tx[i];
+ for (j = 0; j < q->ndesc; j++)
+ usb_free_urb(q->entry[j].ubuf.urb);
+ }
+}
+
+static void mt76u_stop_tx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q;
+ int i, j;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = &dev->q_tx[i];
+ for (j = 0; j < q->ndesc; j++)
+ usb_kill_urb(q->entry[j].ubuf.urb);
+ }
+}
+
+void mt76u_stop_queues(struct mt76_dev *dev)
+{
+ tasklet_disable(&dev->usb.rx_tasklet);
+ tasklet_disable(&dev->usb.tx_tasklet);
+
+ mt76u_stop_rx(dev);
+ mt76u_stop_tx(dev);
+}
+EXPORT_SYMBOL_GPL(mt76u_stop_queues);
+
+void mt76u_stop_stat_wk(struct mt76_dev *dev)
+{
+ cancel_delayed_work_sync(&dev->usb.stat_work);
+ clear_bit(MT76_READING_STATS, &dev->state);
+}
+EXPORT_SYMBOL_GPL(mt76u_stop_stat_wk);
+
+void mt76u_queues_deinit(struct mt76_dev *dev)
+{
+ mt76u_stop_queues(dev);
+
+ mt76u_free_rx(dev);
+ mt76u_free_tx(dev);
+}
+EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
+
+int mt76u_alloc_queues(struct mt76_dev *dev)
+{
+ int err;
+
+ err = mt76u_alloc_rx(dev);
+ if (err < 0)
+ goto err;
+
+ err = mt76u_alloc_tx(dev);
+ if (err < 0)
+ goto err;
+
+ return 0;
+err:
+ mt76u_queues_deinit(dev);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
+
+static const struct mt76_queue_ops usb_queue_ops = {
+ .tx_queue_skb = mt76u_tx_queue_skb,
+ .kick = mt76u_tx_kick,
+};
+
+int mt76u_init(struct mt76_dev *dev,
+ struct usb_interface *intf)
+{
+ static const struct mt76_bus_ops mt76u_ops = {
+ .rr = mt76u_rr,
+ .wr = mt76u_wr,
+ .rmw = mt76u_rmw,
+ .copy = mt76u_copy,
+ };
+ struct mt76_usb *usb = &dev->usb;
+
+ tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
+ tasklet_init(&usb->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
+ INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
+ skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
+
+ init_completion(&usb->mcu.cmpl);
+ mutex_init(&usb->mcu.mutex);
+
+ mutex_init(&usb->usb_ctrl_mtx);
+ dev->bus = &mt76u_ops;
+ dev->queue_ops = &usb_queue_ops;
+
+ return mt76u_set_endpoints(intf, usb);
+}
+EXPORT_SYMBOL_GPL(mt76u_init);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/usb_mcu.c
new file mode 100644
index 000000000000..070be803d463
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/usb_mcu.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/firmware.h>
+
+#include "mt76.h"
+#include "dma.h"
+
+#define MT_CMD_HDR_LEN 4
+
+#define MT_FCE_DMA_ADDR 0x0230
+#define MT_FCE_DMA_LEN 0x0234
+
+#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8
+
+struct sk_buff *mt76u_mcu_msg_alloc(const void *data, int len)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(MT_CMD_HDR_LEN + len + 8, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, MT_CMD_HDR_LEN);
+ skb_put_data(skb, data, len);
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_msg_alloc);
+
+void mt76u_mcu_complete_urb(struct urb *urb)
+{
+ struct completion *cmpl = urb->context;
+
+ complete(cmpl);
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_complete_urb);
+
+static int mt76u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
+{
+ struct mt76_usb *usb = &dev->usb;
+ struct mt76u_buf *buf = &usb->mcu.res;
+ int i, ret;
+ u32 rxfce;
+
+ for (i = 0; i < 5; i++) {
+ if (!wait_for_completion_timeout(&usb->mcu.cmpl,
+ msecs_to_jiffies(300)))
+ continue;
+
+ if (buf->urb->status)
+ return -EIO;
+
+ rxfce = get_unaligned_le32(sg_virt(&buf->urb->sg[0]));
+ ret = mt76u_submit_buf(dev, USB_DIR_IN,
+ MT_EP_IN_CMD_RESP,
+ buf, GFP_KERNEL,
+ mt76u_mcu_complete_urb,
+ &usb->mcu.cmpl);
+ if (ret)
+ return ret;
+
+ if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce))
+ return 0;
+
+ dev_err(dev->dev, "error: MCU resp evt:%lx seq:%hhx-%lx\n",
+ FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce),
+ seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce));
+ }
+
+ dev_err(dev->dev, "error: %s timed out\n", __func__);
+ return -ETIMEDOUT;
+}
+
+int mt76u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
+ int cmd, bool wait_resp)
+{
+ struct usb_interface *intf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct mt76_usb *usb = &dev->usb;
+ unsigned int pipe;
+ int ret, sent;
+ u8 seq = 0;
+ u32 info;
+
+ if (test_bit(MT76_REMOVED, &dev->state))
+ return 0;
+
+ mutex_lock(&usb->mcu.mutex);
+
+ pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
+ if (wait_resp) {
+ seq = ++usb->mcu.msg_seq & 0xf;
+ if (!seq)
+ seq = ++usb->mcu.msg_seq & 0xf;
+ }
+
+ info = FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
+ FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
+ MT_MCU_MSG_TYPE_CMD;
+ ret = mt76u_skb_dma_info(skb, CPU_TX_PORT, info);
+ if (ret)
+ goto out;
+
+ ret = usb_bulk_msg(udev, pipe, skb->data, skb->len, &sent, 500);
+ if (ret)
+ goto out;
+
+ if (wait_resp)
+ ret = mt76u_mcu_wait_resp(dev, seq);
+
+out:
+ mutex_unlock(&usb->mcu.mutex);
+
+ consume_skb(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_send_msg);
+
+void mt76u_mcu_fw_reset(struct mt76_dev *dev)
+{
+ mt76u_vendor_request(dev, MT_VEND_DEV_MODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ 0x1, 0, NULL, 0);
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_fw_reset);
+
+static int
+__mt76u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf,
+ const void *fw_data, int len, u32 dst_addr)
+{
+ u8 *data = sg_virt(&buf->urb->sg[0]);
+ DECLARE_COMPLETION_ONSTACK(cmpl);
+ __le32 info;
+ u32 val;
+ int err;
+
+ info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
+ FIELD_PREP(MT_MCU_MSG_LEN, len) |
+ MT_MCU_MSG_TYPE_CMD);
+
+ memcpy(data, &info, sizeof(info));
+ memcpy(data + sizeof(info), fw_data, len);
+ memset(data + sizeof(info) + len, 0, 4);
+
+ mt76u_single_wr(dev, MT_VEND_WRITE_FCE,
+ MT_FCE_DMA_ADDR, dst_addr);
+ len = roundup(len, 4);
+ mt76u_single_wr(dev, MT_VEND_WRITE_FCE,
+ MT_FCE_DMA_LEN, len << 16);
+
+ buf->len = MT_CMD_HDR_LEN + len + sizeof(info);
+ err = mt76u_submit_buf(dev, USB_DIR_OUT,
+ MT_EP_OUT_INBAND_CMD,
+ buf, GFP_KERNEL,
+ mt76u_mcu_complete_urb, &cmpl);
+ if (err < 0)
+ return err;
+
+ if (!wait_for_completion_timeout(&cmpl,
+ msecs_to_jiffies(1000))) {
+ dev_err(dev->dev, "firmware upload timed out\n");
+ usb_kill_urb(buf->urb);
+ return -ETIMEDOUT;
+ }
+
+ if (mt76u_urb_error(buf->urb)) {
+ dev_err(dev->dev, "firmware upload failed: %d\n",
+ buf->urb->status);
+ return buf->urb->status;
+ }
+
+ val = mt76u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
+ val++;
+ mt76u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
+
+ return 0;
+}
+
+int mt76u_mcu_fw_send_data(struct mt76_dev *dev, const void *data,
+ int data_len, u32 max_payload, u32 offset)
+{
+ int err, len, pos = 0, max_len = max_payload - 8;
+ struct mt76u_buf buf;
+
+ err = mt76u_buf_alloc(dev, &buf, 1, max_payload, max_payload,
+ GFP_KERNEL);
+ if (err < 0)
+ return err;
+
+ while (data_len > 0) {
+ len = min_t(int, data_len, max_len);
+ err = __mt76u_mcu_fw_send_data(dev, &buf, data + pos,
+ len, offset + pos);
+ if (err < 0)
+ break;
+
+ data_len -= len;
+ pos += len;
+ usleep_range(5000, 10000);
+ }
+ mt76u_buf_free(&buf);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_fw_send_data);
+
+int mt76u_mcu_init_rx(struct mt76_dev *dev)
+{
+ struct mt76_usb *usb = &dev->usb;
+ int err;
+
+ err = mt76u_buf_alloc(dev, &usb->mcu.res, 1,
+ MCU_RESP_URB_SIZE, MCU_RESP_URB_SIZE,
+ GFP_KERNEL);
+ if (err < 0)
+ return err;
+
+ err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
+ &usb->mcu.res, GFP_KERNEL,
+ mt76u_mcu_complete_urb,
+ &usb->mcu.cmpl);
+ if (err < 0)
+ mt76u_buf_free(&usb->mcu.res);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_init_rx);
diff --git a/drivers/net/wireless/mediatek/mt76/usb_trace.c b/drivers/net/wireless/mediatek/mt76/usb_trace.c
new file mode 100644
index 000000000000..7e1f540f0b7a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/usb_trace.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "usb_trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/usb_trace.h b/drivers/net/wireless/mediatek/mt76/usb_trace.h
new file mode 100644
index 000000000000..52db7012304a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/usb_trace.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if !defined(__MT76_USB_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76_USB_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt76.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt76_usb
+
+#define MAXNAME 32
+#define DEV_ENTRY __array(char, wiphy_name, 32)
+#define DEV_ASSIGN strlcpy(__entry->wiphy_name, wiphy_name(dev->hw->wiphy), MAXNAME)
+#define DEV_PR_FMT "%s"
+#define DEV_PR_ARG __entry->wiphy_name
+
+#define REG_ENTRY __field(u32, reg) __field(u32, val)
+#define REG_ASSIGN __entry->reg = reg; __entry->val = val
+#define REG_PR_FMT " %04x=%08x"
+#define REG_PR_ARG __entry->reg, __entry->val
+
+DECLARE_EVENT_CLASS(dev_reg_evt,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ REG_ENTRY
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ REG_ASSIGN;
+ ),
+ TP_printk(
+ DEV_PR_FMT REG_PR_FMT,
+ DEV_PR_ARG, REG_PR_ARG
+ )
+);
+
+DEFINE_EVENT(dev_reg_evt, usb_reg_rr,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_reg_evt, usb_reg_wr,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE usb_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c
index d3b611aaf061..faea99b7a445 100644
--- a/drivers/net/wireless/mediatek/mt7601u/init.c
+++ b/drivers/net/wireless/mediatek/mt7601u/init.c
@@ -603,6 +603,7 @@ int mt7601u_register_device(struct mt7601u_dev *dev)
ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
hw->max_rates = 1;
hw->max_report_rates = 7;
hw->max_rate_tries = 1;
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
index 7b21016012c3..0f1789020960 100644
--- a/drivers/net/wireless/mediatek/mt7601u/main.c
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -308,6 +308,17 @@ mt7601u_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
int idx = key->keyidx;
int ret;
+ /* fall back to sw encryption for unsupported ciphers */
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
if (cmd == SET_KEY) {
key->hw_key_idx = wcid->idx;
wcid->hw_key_idx = idx;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 656ddc659218..4aa332f4646b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -843,6 +843,88 @@ static int qtnf_set_mac_acl(struct wiphy *wiphy,
return ret;
}
+static int qtnf_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ bool enabled, int timeout)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
+ int ret;
+
+ ret = qtnf_cmd_send_pm_set(vif, enabled ? QLINK_PM_AUTO_STANDBY :
+ QLINK_PM_OFF, timeout);
+ if (ret) {
+ pr_err("%s: failed to set PM mode ret=%d\n", dev->name, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int qtnf_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wowlan)
+{
+ struct qtnf_wmac *mac = wiphy_priv(wiphy);
+ struct qtnf_vif *vif;
+ int ret = 0;
+
+ vif = qtnf_mac_get_base_vif(mac);
+ if (!vif) {
+ pr_err("MAC%u: primary VIF is not configured\n", mac->macid);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ if (!wowlan) {
+ pr_debug("WoWLAN triggers are not enabled\n");
+ qtnf_virtual_intf_cleanup(vif->netdev);
+ goto exit;
+ }
+
+ qtnf_scan_done(vif->mac, true);
+
+ ret = qtnf_cmd_send_wowlan_set(vif, wowlan);
+ if (ret) {
+ pr_err("MAC%u: failed to set WoWLAN triggers\n",
+ mac->macid);
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static int qtnf_resume(struct wiphy *wiphy)
+{
+ struct qtnf_wmac *mac = wiphy_priv(wiphy);
+ struct qtnf_vif *vif;
+ int ret = 0;
+
+ vif = qtnf_mac_get_base_vif(mac);
+ if (!vif) {
+ pr_err("MAC%u: primary VIF is not configured\n", mac->macid);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ ret = qtnf_cmd_send_wowlan_set(vif, NULL);
+ if (ret) {
+ pr_err("MAC%u: failed to reset WoWLAN triggers\n",
+ mac->macid);
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static void qtnf_set_wakeup(struct wiphy *wiphy, bool enabled)
+{
+ struct qtnf_wmac *mac = wiphy_priv(wiphy);
+ struct qtnf_bus *bus = mac->bus;
+
+ device_set_wakeup_enable(bus->dev, enabled);
+}
+#endif
+
static struct cfg80211_ops qtn_cfg80211_ops = {
.add_virtual_intf = qtnf_add_virtual_intf,
.change_virtual_intf = qtnf_change_virtual_intf,
@@ -869,6 +951,12 @@ static struct cfg80211_ops qtn_cfg80211_ops = {
.channel_switch = qtnf_channel_switch,
.start_radar_detection = qtnf_start_radar_detection,
.set_mac_acl = qtnf_set_mac_acl,
+ .set_power_mgmt = qtnf_set_power_mgmt,
+#ifdef CONFIG_PM
+ .suspend = qtnf_suspend,
+ .resume = qtnf_resume,
+ .set_wakeup = qtnf_set_wakeup,
+#endif
};
static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in,
@@ -921,6 +1009,9 @@ struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus)
if (bus->hw_info.hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD)
qtn_cfg80211_ops.start_radar_detection = NULL;
+ if (!(bus->hw_info.hw_capab & QLINK_HW_CAPAB_PWR_MGMT))
+ qtn_cfg80211_ops.set_power_mgmt = NULL;
+
wiphy = wiphy_new(&qtn_cfg80211_ops, sizeof(struct qtnf_wmac));
if (!wiphy)
return NULL;
@@ -975,7 +1066,8 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
wiphy->retry_long = macinfo->lretry_limit;
wiphy->coverage_class = macinfo->coverage_class;
- wiphy->max_scan_ssids = QTNF_MAX_SSID_LIST_LENGTH;
+ wiphy->max_scan_ssids =
+ (hw_info->max_scan_ssids) ? hw_info->max_scan_ssids : 1;
wiphy->max_scan_ie_len = QTNF_MAX_VSIE_LEN;
wiphy->mgmt_stypes = qtnf_mgmt_stypes;
wiphy->max_remain_on_channel_duration = 5000;
@@ -994,6 +1086,7 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
WIPHY_FLAG_AP_UAPSD |
WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
if (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD)
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD);
@@ -1016,6 +1109,11 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR)
wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+#ifdef CONFIG_PM
+ if (macinfo->wowlan)
+ wiphy->wowlan = macinfo->wowlan;
+#endif
+
if (hw_info->hw_capab & QLINK_HW_CAPAB_REG_UPDATE) {
wiphy->regulatory_flags |= REGULATORY_STRICT_REG |
REGULATORY_CUSTOM_REG;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index 42a598f92539..ae9e77300533 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -1092,6 +1092,9 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
case QTN_TLV_ID_UBOOT_VER:
uboot_ver = (const void *)tlv->val;
break;
+ case QTN_TLV_ID_MAX_SCAN_SSIDS:
+ hwinfo->max_scan_ssids = *tlv->val;
+ break;
default:
break;
}
@@ -1135,6 +1138,37 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
return 0;
}
+static void
+qtnf_parse_wowlan_info(struct qtnf_wmac *mac,
+ const struct qlink_wowlan_capab_data *wowlan)
+{
+ struct qtnf_mac_info *mac_info = &mac->macinfo;
+ const struct qlink_wowlan_support *data1;
+ struct wiphy_wowlan_support *supp;
+
+ supp = kzalloc(sizeof(*supp), GFP_KERNEL);
+ if (!supp)
+ return;
+
+ switch (le16_to_cpu(wowlan->version)) {
+ case 0x1:
+ data1 = (struct qlink_wowlan_support *)wowlan->data;
+
+ supp->flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT;
+ supp->n_patterns = le32_to_cpu(data1->n_patterns);
+ supp->pattern_max_len = le32_to_cpu(data1->pattern_max_len);
+ supp->pattern_min_len = le32_to_cpu(data1->pattern_min_len);
+
+ mac_info->wowlan = supp;
+ break;
+ default:
+ pr_warn("MAC%u: unsupported WoWLAN version 0x%x\n",
+ mac->macid, le16_to_cpu(wowlan->version));
+ kfree(supp);
+ break;
+ }
+}
+
static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
const u8 *tlv_buf, size_t tlv_buf_size)
{
@@ -1144,6 +1178,7 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
const struct qlink_iface_comb_num *comb_num;
const struct qlink_iface_limit_record *rec;
const struct qlink_iface_limit *lim;
+ const struct qlink_wowlan_capab_data *wowlan;
u16 rec_len;
u16 tlv_type;
u16 tlv_value_len;
@@ -1252,7 +1287,31 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
ext_capa_mask = (u8 *)tlv->val;
ext_capa_mask_len = tlv_value_len;
break;
+ case QTN_TLV_ID_WOWLAN_CAPAB:
+ if (tlv_value_len < sizeof(*wowlan))
+ return -EINVAL;
+
+ wowlan = (void *)tlv->val;
+ if (!le16_to_cpu(wowlan->len)) {
+ pr_warn("MAC%u: skip empty WoWLAN data\n",
+ mac->macid);
+ break;
+ }
+
+ rec_len = sizeof(*wowlan) + le16_to_cpu(wowlan->len);
+ if (unlikely(tlv_value_len != rec_len)) {
+ pr_warn("MAC%u: WoWLAN data size mismatch\n",
+ mac->macid);
+ return -EINVAL;
+ }
+
+ kfree(mac->macinfo.wowlan);
+ mac->macinfo.wowlan = NULL;
+ qtnf_parse_wowlan_info(mac, wowlan);
+ break;
default:
+ pr_warn("MAC%u: unknown TLV type %u\n",
+ mac->macid, tlv_type);
break;
}
@@ -2260,11 +2319,6 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
int count = 0;
int ret;
- if (scan_req->n_ssids > QTNF_MAX_SSID_LIST_LENGTH) {
- pr_err("MAC%u: too many SSIDs in scan request\n", mac->macid);
- return -EINVAL;
- }
-
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
QLINK_CMD_SCAN,
sizeof(struct qlink_cmd));
@@ -2799,3 +2853,93 @@ int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
return ret;
}
+
+int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout)
+{
+ struct qtnf_bus *bus = vif->mac->bus;
+ struct sk_buff *cmd_skb;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ struct qlink_cmd_pm_set *cmd;
+ int ret = 0;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_PM_SET, sizeof(*cmd));
+ if (!cmd_skb)
+ return -ENOMEM;
+
+ cmd = (struct qlink_cmd_pm_set *)cmd_skb->data;
+ cmd->pm_mode = pm_mode;
+ cmd->pm_standby_timer = cpu_to_le32(timeout);
+
+ qtnf_bus_lock(bus);
+
+ ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
+
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("cmd exec failed: 0x%.4X\n", res_code);
+ ret = -EFAULT;
+ }
+
+out:
+ qtnf_bus_unlock(bus);
+ return ret;
+}
+
+int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif,
+ const struct cfg80211_wowlan *wowl)
+{
+ struct qtnf_bus *bus = vif->mac->bus;
+ struct sk_buff *cmd_skb;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ struct qlink_cmd_wowlan_set *cmd;
+ u32 triggers = 0;
+ int count = 0;
+ int ret = 0;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_WOWLAN_SET, sizeof(*cmd));
+ if (!cmd_skb)
+ return -ENOMEM;
+
+ qtnf_bus_lock(bus);
+
+ cmd = (struct qlink_cmd_wowlan_set *)cmd_skb->data;
+
+ if (wowl) {
+ if (wowl->disconnect)
+ triggers |= QLINK_WOWLAN_TRIG_DISCONNECT;
+
+ if (wowl->magic_pkt)
+ triggers |= QLINK_WOWLAN_TRIG_MAGIC_PKT;
+
+ if (wowl->n_patterns && wowl->patterns) {
+ triggers |= QLINK_WOWLAN_TRIG_PATTERN_PKT;
+ while (count < wowl->n_patterns) {
+ qtnf_cmd_skb_put_tlv_arr(cmd_skb,
+ QTN_TLV_ID_WOWLAN_PATTERN,
+ wowl->patterns[count].pattern,
+ wowl->patterns[count].pattern_len);
+ count++;
+ }
+ }
+ }
+
+ cmd->triggers = cpu_to_le32(triggers);
+
+ ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
+
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ pr_err("cmd exec failed: 0x%.4X\n", res_code);
+ ret = -EFAULT;
+ }
+
+out:
+ qtnf_bus_unlock(bus);
+ return ret;
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h
index cf9274add26d..1ac41156c192 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h
@@ -76,5 +76,8 @@ int qtnf_cmd_start_cac(const struct qtnf_vif *vif,
u32 cac_time_ms);
int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
const struct cfg80211_acl_data *params);
+int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout);
+int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif,
+ const struct cfg80211_wowlan *wowl);
#endif /* QLINK_COMMANDS_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index c318340e1bd5..19abbc4e23e0 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -495,6 +495,7 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid)
qtnf_mac_iface_comb_free(mac);
kfree(mac->macinfo.extended_capabilities);
kfree(mac->macinfo.extended_capabilities_mask);
+ kfree(mac->macinfo.wowlan);
wiphy_free(wiphy);
bus->mac[macid] = NULL;
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index 214435448335..a1e338a1f055 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -40,7 +40,6 @@
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
-#define QTNF_MAX_SSID_LIST_LENGTH 2
#define QTNF_MAX_VSIE_LEN 255
#define QTNF_MAX_INTF 8
#define QTNF_MAX_EVENT_QUEUE_LEN 255
@@ -111,6 +110,7 @@ struct qtnf_mac_info {
u8 *extended_capabilities;
u8 *extended_capabilities_mask;
u8 extended_capabilities_len;
+ struct wiphy_wowlan_support *wowlan;
};
struct qtnf_chan_stats {
@@ -145,6 +145,7 @@ struct qtnf_hw_info {
u8 total_rx_chain;
char fw_version[ETHTOOL_FWVERS_LEN];
u32 hw_version;
+ u8 max_scan_ssids;
};
struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index 4a32967d0479..99d37e3efba6 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -77,6 +77,7 @@ enum qlink_hw_capab {
QLINK_HW_CAPAB_STA_INACT_TIMEOUT = BIT(1),
QLINK_HW_CAPAB_DFS_OFFLOAD = BIT(2),
QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR = BIT(3),
+ QLINK_HW_CAPAB_PWR_MGMT = BIT(4),
};
enum qlink_iface_type {
@@ -256,6 +257,8 @@ enum qlink_cmd_type {
QLINK_CMD_CHAN_STATS = 0x0054,
QLINK_CMD_CONNECT = 0x0060,
QLINK_CMD_DISCONNECT = 0x0061,
+ QLINK_CMD_PM_SET = 0x0062,
+ QLINK_CMD_WOWLAN_SET = 0x0063,
};
/**
@@ -668,6 +671,54 @@ struct qlink_acl_data {
struct qlink_mac_address mac_addrs[0];
} __packed;
+/**
+ * enum qlink_pm_mode - Power Management mode
+ *
+ * @QLINK_PM_OFF: normal mode, no power saving enabled
+ * @QLINK_PM_AUTO_STANDBY: enable auto power save mode
+ */
+enum qlink_pm_mode {
+ QLINK_PM_OFF = 0,
+ QLINK_PM_AUTO_STANDBY = 1,
+};
+
+/**
+ * struct qlink_cmd_pm_set - data for QLINK_CMD_PM_SET command
+ *
+ * @pm_standby timer: period of network inactivity in seconds before
+ * putting a radio in power save mode
+ * @pm_mode: power management mode
+ */
+struct qlink_cmd_pm_set {
+ struct qlink_cmd chdr;
+ __le32 pm_standby_timer;
+ u8 pm_mode;
+} __packed;
+
+/**
+ * enum qlink_wowlan_trigger
+ *
+ * @QLINK_WOWLAN_TRIG_DISCONNECT: wakeup on disconnect
+ * @QLINK_WOWLAN_TRIG_MAGIC_PKT: wakeup on magic packet
+ * @QLINK_WOWLAN_TRIG_PATTERN_PKT: wakeup on user-defined packet
+ */
+enum qlink_wowlan_trigger {
+ QLINK_WOWLAN_TRIG_DISCONNECT = BIT(0),
+ QLINK_WOWLAN_TRIG_MAGIC_PKT = BIT(1),
+ QLINK_WOWLAN_TRIG_PATTERN_PKT = BIT(2),
+};
+
+/**
+ * struct qlink_cmd_wowlan_set - data for QLINK_CMD_WOWLAN_SET command
+ *
+ * @triggers: requested bitmask of WoWLAN triggers
+ */
+struct qlink_cmd_wowlan_set {
+ struct qlink_cmd chdr;
+ __le32 triggers;
+ u8 data[0];
+} __packed;
+
/* QLINK Command Responses messages related definitions
*/
@@ -1065,6 +1116,8 @@ struct qlink_event_radar {
* @QTN_TLV_ID_STA_STATS: per-STA statistics as defined by
* &struct qlink_sta_stats. Valid values are marked as such in a bitmap
* carried by QTN_TLV_ID_STA_STATS_MAP.
+ * @QTN_TLV_ID_MAX_SCAN_SSIDS: maximum number of SSIDs the device can scan
+ * for in any given scan.
*/
enum qlink_tlv_id {
QTN_TLV_ID_FRAG_THRESH = 0x0201,
@@ -1093,6 +1146,9 @@ enum qlink_tlv_id {
QTN_TLV_ID_CALIBRATION_VER = 0x0406,
QTN_TLV_ID_UBOOT_VER = 0x0407,
QTN_TLV_ID_RANDOM_MAC_ADDR = 0x0408,
+ QTN_TLV_ID_MAX_SCAN_SSIDS = 0x0409,
+ QTN_TLV_ID_WOWLAN_CAPAB = 0x0410,
+ QTN_TLV_ID_WOWLAN_PATTERN = 0x0411,
};
struct qlink_tlv_hdr {
@@ -1380,4 +1436,33 @@ struct qlink_random_mac_addr {
u8 mac_addr_mask[ETH_ALEN];
} __packed;
+/**
+ * struct qlink_wowlan_capab_data - data for QTN_TLV_ID_WOWLAN_CAPAB TLV
+ *
+ * WoWLAN capabilities supported by cards.
+ *
+ * @version: version of WoWLAN data structure, to ensure backward
+ * compatibility for firmwares with limited WoWLAN support
+ * @len: Total length of WoWLAN data
+ * @data: supported WoWLAN features
+ */
+struct qlink_wowlan_capab_data {
+ __le16 version;
+ __le16 len;
+ u8 data[0];
+} __packed;
+
+/**
+ * struct qlink_wowlan_support - supported WoWLAN capabilities
+ *
+ * @n_patterns: number of supported wakeup patterns
+ * @pattern_max_len: maximum length of each pattern
+ * @pattern_min_len: minimum length of each pattern
+ */
+struct qlink_wowlan_support {
+ __le32 n_patterns;
+ __le32 pattern_max_len;
+ __le32 pattern_min_len;
+} __packed;
+
#endif /* _QTN_QLINK_H_ */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index c380c1f56ba6..fa2fd64084ac 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -527,24 +527,6 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
EXPORT_SYMBOL_GPL(rt2x00mac_set_key);
#endif /* CONFIG_RT2X00_LIB_CRYPTO */
-int rt2x00mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct rt2x00_dev *rt2x00dev = hw->priv;
-
- return rt2x00dev->ops->lib->sta_add(rt2x00dev, vif, sta);
-}
-EXPORT_SYMBOL_GPL(rt2x00mac_sta_add);
-
-int rt2x00mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct rt2x00_dev *rt2x00dev = hw->priv;
-
- return rt2x00dev->ops->lib->sta_remove(rt2x00dev, sta);
-}
-EXPORT_SYMBOL_GPL(rt2x00mac_sta_remove);
-
void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const u8 *mac_addr)
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index a7e0a17aa7e8..08c607c031bc 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -470,7 +470,6 @@ static inline struct rcs __iomem *rcs_base(ray_dev_t *dev)
static int ray_init(struct net_device *dev)
{
int i;
- UCHAR *p;
struct ccs __iomem *pccs;
ray_dev_t *local = netdev_priv(dev);
struct pcmcia_device *link = local->finder;
@@ -513,12 +512,9 @@ static int ray_init(struct net_device *dev)
init_startup_params(local);
/* copy mac address to startup parameters */
- if (parse_addr(phy_addr, local->sparm.b4.a_mac_addr)) {
- p = local->sparm.b4.a_mac_addr;
- } else {
+ if (!parse_addr(phy_addr, local->sparm.b4.a_mac_addr)) {
memcpy(&local->sparm.b4.a_mac_addr,
&local->startup_res.station_addr, ADDRLEN);
- p = local->sparm.b4.a_mac_addr;
}
clear_interrupt(local); /* Clear any interrupt from the card */
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
index fde89866fa8d..51e32df6120b 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
@@ -363,7 +363,7 @@ void rtl8225se_rf_init(struct ieee80211_hw *dev)
rtl8187se_rf_writereg(dev, 0x00, 0x0037); mdelay(11);
rtl8187se_rf_writereg(dev, 0x04, 0x0160); mdelay(11);
rtl8187se_rf_writereg(dev, 0x07, 0x0080); mdelay(11);
- rtl8187se_rf_writereg(dev, 0x02, 0x088D); mdelay(221);
+ rtl8187se_rf_writereg(dev, 0x02, 0x088D); msleep(221);
rtl8187se_rf_writereg(dev, 0x00, 0x0137); mdelay(11);
rtl8187se_rf_writereg(dev, 0x07, 0x0000); mdelay(1);
rtl8187se_rf_writereg(dev, 0x07, 0x0180); mdelay(1);
@@ -386,7 +386,7 @@ void rtl8225se_rf_init(struct ieee80211_hw *dev)
rtl8187se_rf_writereg(dev, 0x00, 0x00BF); mdelay(1);
rtl8187se_rf_writereg(dev, 0x0D, 0x08DF); mdelay(1);
rtl8187se_rf_writereg(dev, 0x02, 0x004D); mdelay(1);
- rtl8187se_rf_writereg(dev, 0x04, 0x0975); mdelay(31);
+ rtl8187se_rf_writereg(dev, 0x04, 0x0975); msleep(31);
rtl8187se_rf_writereg(dev, 0x00, 0x0197); mdelay(1);
rtl8187se_rf_writereg(dev, 0x05, 0x05AB); mdelay(1);
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index 0761e61591bd..27e6baf12e90 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -26,6 +26,9 @@ static struct ta_metadata metadata_flash_content[] = {
{"flash_content", 0x00010000},
{"rsi/rs9113_wlan_qspi.rps", 0x00010000},
{"rsi/rs9113_wlan_bt_dual_mode.rps", 0x00010000},
+ {"flash_content", 0x00010000},
+ {"rsi/rs9113_ap_bt_dual_mode.rps", 0x00010000},
+
};
int rsi_send_pkt_to_bus(struct rsi_common *common, struct sk_buff *skb)
@@ -246,7 +249,7 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
}
}
- data_desc->mac_flags = cpu_to_le16(seq_num & 0xfff);
+ data_desc->mac_flags |= cpu_to_le16(seq_num & 0xfff);
data_desc->qid_tid = ((skb->priority & 0xf) |
((tx_params->tid & 0xf) << 4));
data_desc->sta_id = tx_params->sta_id;
@@ -842,7 +845,6 @@ static int rsi_load_firmware(struct rsi_hw *adapter)
const struct firmware *fw_entry = NULL;
u32 regout_val = 0, content_size;
u16 tmp_regout_val = 0;
- u8 *flash_content = NULL;
struct ta_metadata *metadata_p;
int status;
@@ -904,28 +906,22 @@ static int rsi_load_firmware(struct rsi_hw *adapter)
__func__, metadata_p->name);
return status;
}
- flash_content = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
- if (!flash_content) {
- rsi_dbg(ERR_ZONE, "%s: Failed to copy firmware\n", __func__);
- status = -EIO;
- goto fail;
- }
content_size = fw_entry->size;
rsi_dbg(INFO_ZONE, "FW Length = %d bytes\n", content_size);
/* Get the firmware version */
common->lmac_ver.ver.info.fw_ver[0] =
- flash_content[LMAC_VER_OFFSET] & 0xFF;
+ fw_entry->data[LMAC_VER_OFFSET] & 0xFF;
common->lmac_ver.ver.info.fw_ver[1] =
- flash_content[LMAC_VER_OFFSET + 1] & 0xFF;
- common->lmac_ver.major = flash_content[LMAC_VER_OFFSET + 2] & 0xFF;
+ fw_entry->data[LMAC_VER_OFFSET + 1] & 0xFF;
+ common->lmac_ver.major = fw_entry->data[LMAC_VER_OFFSET + 2] & 0xFF;
common->lmac_ver.release_num =
- flash_content[LMAC_VER_OFFSET + 3] & 0xFF;
- common->lmac_ver.minor = flash_content[LMAC_VER_OFFSET + 4] & 0xFF;
+ fw_entry->data[LMAC_VER_OFFSET + 3] & 0xFF;
+ common->lmac_ver.minor = fw_entry->data[LMAC_VER_OFFSET + 4] & 0xFF;
common->lmac_ver.patch_num = 0;
rsi_print_version(common);
- status = bl_write_header(adapter, flash_content, content_size);
+ status = bl_write_header(adapter, (u8 *)fw_entry->data, content_size);
if (status) {
rsi_dbg(ERR_ZONE,
"%s: RPS Image header loading failed\n",
@@ -967,7 +963,7 @@ fw_upgrade:
rsi_dbg(INFO_ZONE, "Burn Command Pass.. Upgrading the firmware\n");
- status = auto_fw_upgrade(adapter, flash_content, content_size);
+ status = auto_fw_upgrade(adapter, (u8 *)fw_entry->data, content_size);
if (status == 0) {
rsi_dbg(ERR_ZONE, "Firmware upgradation Done\n");
goto load_image_cmd;
@@ -981,13 +977,11 @@ fw_upgrade:
success:
rsi_dbg(ERR_ZONE, "***** Firmware Loading successful *****\n");
- kfree(flash_content);
release_firmware(fw_entry);
return 0;
fail:
rsi_dbg(ERR_ZONE, "##### Firmware loading failed #####\n");
- kfree(flash_content);
release_firmware(fw_entry);
return status;
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 2ca7464b7fa3..4e510cbe0a89 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -416,7 +416,8 @@ static int rsi_mac80211_add_interface(struct ieee80211_hw *hw,
/* Get free vap index */
for (i = 0; i < RSI_MAX_VIFS; i++) {
- if (!adapter->vifs[i]) {
+ if (!adapter->vifs[i] ||
+ !memcmp(vif->addr, adapter->vifs[i]->addr, ETH_ALEN)) {
vap_idx = i;
break;
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c
index 1485a0c89df2..01d99ed985ee 100644
--- a/drivers/net/wireless/rsi/rsi_91x_main.c
+++ b/drivers/net/wireless/rsi/rsi_91x_main.c
@@ -122,7 +122,6 @@ static struct sk_buff *rsi_prepare_skb(struct rsi_common *common,
u8 extended_desc)
{
struct ieee80211_tx_info *info;
- struct skb_info *rx_params;
struct sk_buff *skb = NULL;
u8 payload_offset;
struct ieee80211_vif *vif;
@@ -149,10 +148,6 @@ static struct sk_buff *rsi_prepare_skb(struct rsi_common *common,
vif = rsi_get_vif(common->priv, wh->addr1);
info = IEEE80211_SKB_CB(skb);
- rx_params = (struct skb_info *)info->driver_data;
- rx_params->rssi = rsi_get_rssi(buffer);
- rx_params->channel = rsi_get_connected_channel(vif);
-
return skb;
}
@@ -336,7 +331,6 @@ struct rsi_hw *rsi_91x_init(u16 oper_mode)
spin_lock_init(&adapter->ps_lock);
timer_setup(&common->roc_timer, rsi_roc_timeout, 0);
init_completion(&common->wlan_init_completion);
- common->init_done = true;
adapter->device_model = RSI_DEV_9113;
common->oper_mode = oper_mode;
@@ -374,6 +368,7 @@ struct rsi_hw *rsi_91x_init(u16 oper_mode)
}
#endif
+ common->init_done = true;
return adapter;
err:
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index d0e5937cad6d..1095df7d9573 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -334,20 +334,17 @@ static int rsi_load_radio_caps(struct rsi_common *common)
struct ieee80211_conf *conf = &hw->conf;
if (conf_is_ht40_plus(conf)) {
- radio_caps->radio_cfg_info =
- RSI_CMDDESC_LOWER_20_ENABLE;
- radio_caps->radio_info =
- RSI_CMDDESC_LOWER_20_ENABLE;
+ radio_caps->ppe_ack_rate =
+ cpu_to_le16(LOWER_20_ENABLE |
+ (LOWER_20_ENABLE >> 12));
} else if (conf_is_ht40_minus(conf)) {
- radio_caps->radio_cfg_info =
- RSI_CMDDESC_UPPER_20_ENABLE;
- radio_caps->radio_info =
- RSI_CMDDESC_UPPER_20_ENABLE;
+ radio_caps->ppe_ack_rate =
+ cpu_to_le16(UPPER_20_ENABLE |
+ (UPPER_20_ENABLE >> 12));
} else {
- radio_caps->radio_cfg_info =
- RSI_CMDDESC_40MHZ;
- radio_caps->radio_info =
- RSI_CMDDESC_FULL_40_ENABLE;
+ radio_caps->ppe_ack_rate =
+ cpu_to_le16((BW_40MHZ << 12) |
+ FULL40M_ENABLE);
}
}
}
@@ -749,7 +746,7 @@ int rsi_hal_load_key(struct rsi_common *common,
key_descriptor |= RSI_CIPHER_TKIP;
}
key_descriptor |= RSI_PROTECT_DATA_FRAMES;
- key_descriptor |= ((key_id << RSI_KEY_ID_OFFSET) & RSI_KEY_ID_MASK);
+ key_descriptor |= (key_id << RSI_KEY_ID_OFFSET);
rsi_set_len_qno(&set_key->desc_dword0.len_qno,
(frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q);
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 416981d99229..5733e440ecaf 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -1394,10 +1394,7 @@ static const struct dev_pm_ops rsi_pm_ops = {
#endif
static const struct sdio_device_id rsi_dev_table[] = {
- { SDIO_DEVICE(0x303, 0x100) },
- { SDIO_DEVICE(0x041B, 0x0301) },
- { SDIO_DEVICE(0x041B, 0x0201) },
- { SDIO_DEVICE(0x041B, 0x9330) },
+ { SDIO_DEVICE(RSI_SDIO_VID_9113, RSI_SDIO_PID_9113) },
{ /* Blank */},
};
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index 6ce6b754df12..c0a163e40402 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -835,11 +835,7 @@ static int rsi_resume(struct usb_interface *intf)
#endif
static const struct usb_device_id rsi_dev_table[] = {
- { USB_DEVICE(0x0303, 0x0100) },
- { USB_DEVICE(0x041B, 0x0301) },
- { USB_DEVICE(0x041B, 0x0201) },
- { USB_DEVICE(0x041B, 0x9330) },
- { USB_DEVICE(0x1618, 0x9113) },
+ { USB_DEVICE(RSI_USB_VID_9113, RSI_USB_PID_9113) },
{ /* Blank */},
};
diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h
index 14620935c925..359fbdf85739 100644
--- a/drivers/net/wireless/rsi/rsi_mgmt.h
+++ b/drivers/net/wireless/rsi/rsi_mgmt.h
@@ -22,7 +22,7 @@
#include "rsi_main.h"
#define MAX_MGMT_PKT_SIZE 512
-#define RSI_NEEDED_HEADROOM 80
+#define RSI_NEEDED_HEADROOM 84
#define RSI_RCV_BUFFER_LEN 2000
#define RSI_11B_MODE 0
diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h
index 353dbdf31e75..66dcd2ec9051 100644
--- a/drivers/net/wireless/rsi/rsi_sdio.h
+++ b/drivers/net/wireless/rsi/rsi_sdio.h
@@ -28,6 +28,9 @@
#include <linux/mmc/sdio_ids.h>
#include "rsi_main.h"
+#define RSI_SDIO_VID_9113 0x041B
+#define RSI_SDIO_PID_9113 0x9330
+
enum sdio_interrupt_type {
BUFFER_FULL = 0x0,
BUFFER_AVAILABLE = 0x2,
diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h
index b6fe79f0a513..5b2eddd1a2ee 100644
--- a/drivers/net/wireless/rsi/rsi_usb.h
+++ b/drivers/net/wireless/rsi/rsi_usb.h
@@ -22,6 +22,9 @@
#include "rsi_main.h"
#include "rsi_common.h"
+#define RSI_USB_VID_9113 0x1618
+#define RSI_USB_PID_9113 0x9113
+
#define USB_INTERNAL_REG_1 0x25000
#define RSI_USB_READY_MAGIC_NUM 0xab
#define FW_STATUS_REG 0x41050012
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 37f785f601c1..89b0d0fade9f 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -6160,16 +6160,16 @@ static int wl1271_register_hw(struct wl1271 *wl)
}
if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
- wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.\n");
+ wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
if (!strcmp(pdev_data->family->name, "wl18xx")) {
- wl1271_warning("This default nvs file can be removed from the file system\n");
+ wl1271_warning("This default nvs file can be removed from the file system");
} else {
- wl1271_warning("Your device performance is not optimized.\n");
- wl1271_warning("Please use the calibrator tool to configure your device.\n");
+ wl1271_warning("Your device performance is not optimized.");
+ wl1271_warning("Please use the calibrator tool to configure your device.");
}
if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
- wl1271_warning("Fuse mac address is zero. using random mac\n");
+ wl1271_warning("Fuse mac address is zero. using random mac");
/* Use TI oui and a random nic */
oui_addr = WLCORE_TI_OUI_ADDRESS;
nic_addr = get_random_int();
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index 0f15696195f8..078a4940bc5c 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -59,7 +59,7 @@ static u32 wlcore_rx_get_align_buf_size(struct wl1271 *wl, u32 pkt_len)
static void wl1271_rx_status(struct wl1271 *wl,
struct wl1271_rx_descriptor *desc,
struct ieee80211_rx_status *status,
- u8 beacon)
+ u8 beacon, u8 probe_rsp)
{
memset(status, 0, sizeof(struct ieee80211_rx_status));
@@ -106,6 +106,9 @@ static void wl1271_rx_status(struct wl1271 *wl,
}
}
+ if (beacon || probe_rsp)
+ status->boottime_ns = ktime_get_boot_ns();
+
if (beacon)
wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel,
status->band);
@@ -191,7 +194,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
if (ieee80211_is_data_present(hdr->frame_control))
is_data = 1;
- wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
+ wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon,
+ ieee80211_is_probe_resp(hdr->frame_control));
wlcore_hw_set_rx_csum(wl, desc, skb);
seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index a27daa23c9dc..3621e05a7494 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1603,9 +1603,9 @@ static void xenvif_ctrl_action(struct xenvif *vif)
static bool xenvif_ctrl_work_todo(struct xenvif *vif)
{
if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl)))
- return 1;
+ return true;
- return 0;
+ return false;
}
irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 4d88aa394273..799cba4624a5 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -87,6 +87,7 @@ struct netfront_cb {
/* IRQ name is queue name with "-tx" or "-rx" appended */
#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
+static DECLARE_WAIT_QUEUE_HEAD(module_load_q);
static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
struct netfront_stats {
@@ -1331,6 +1332,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
netif_carrier_off(netdev);
xenbus_switch_state(dev, XenbusStateInitialising);
+ wait_event(module_load_q,
+ xenbus_read_driver_state(dev->otherend) !=
+ XenbusStateClosed &&
+ xenbus_read_driver_state(dev->otherend) !=
+ XenbusStateUnknown);
return netdev;
exit: